aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/bonding/bond_3ad.c55
-rw-r--r--drivers/net/bonding/bond_main.c121
-rw-r--r--drivers/net/bonding/bond_options.c6
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bfin_can.c1
-rw-r--r--drivers/net/can/c_can/c_can.c2
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/dev.c5
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/janz-ican3.c7
-rw-r--r--drivers/net/can/m_can/m_can.c1
-rw-r--r--drivers/net/can/pch_can.c1
-rw-r--r--drivers/net/can/rcar_can.c1
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/can/spi/mcp251x.c1
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/Kconfig22
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c723
-rw-r--r--drivers/net/can/usb/peak_usb/Makefile2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_ucan.h222
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c83
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h26
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c1095
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h13
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c88
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h4
-rw-r--r--drivers/net/dsa/mv88e6131.c3
-rw-r--r--drivers/net/dsa/mv88e6352.c13
-rw-r--r--drivers/net/dsa/mv88e6xxx.c9
-rw-r--r--drivers/net/ethernet/3com/typhoon.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c8
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c32
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c66
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c78
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c203
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c29
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c12
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h31
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c94
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c109
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c9
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c84
-rw-r--r--drivers/net/ethernet/cadence/macb.h631
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/mc5.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c317
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h169
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c100
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c1917
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1003
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c270
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1543
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h367
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h3392
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h124
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h101
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c43
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h16
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c56
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c21
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c179
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_stats.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c40
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h203
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c231
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h218
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c16
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h240
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c951
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c145
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c95
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c130
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig9
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c971
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_mdio.c186
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c20
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c41
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c44
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h152
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c136
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c149
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c34
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h108
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c44
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c112
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h11
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c157
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c267
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c118
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c90
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h36
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c499
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h10
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c294
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c144
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c489
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c12
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c3
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c25
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c16
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c48
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h30
-rw-r--r--drivers/net/ethernet/rocker/rocker.c177
-rw-r--r--drivers/net/ethernet/rocker/rocker.h21
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c69
-rw-r--r--drivers/net/ethernet/smsc/Kconfig10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c437
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h1
-rw-r--r--drivers/net/ethernet/sun/niu.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c90
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c4
-rw-r--r--drivers/net/ethernet/ti/Kconfig25
-rw-r--r--drivers/net/ethernet/ti/Makefile11
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c55
-rw-r--r--drivers/net/ethernet/ti/cpsw.c111
-rw-r--r--drivers/net/ethernet/ti/cpsw.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c26
-rw-r--r--drivers/net/ethernet/ti/cpts.c5
-rw-r--r--drivers/net/ethernet/ti/cpts.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c56
-rw-r--r--drivers/net/ethernet/ti/netcp.h229
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2149
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c2159
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c131
-rw-r--r--drivers/net/ethernet/ti/netcp_xgbepcsr.c501
-rw-r--r--drivers/net/ethernet/ti/tlan.c14
-rw-r--r--drivers/net/ethernet/via/via-rhine.c9
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/fddi/skfp/smt.c12
-rw-r--r--drivers/net/hyperv/netvsc.c11
-rw-r--r--drivers/net/hyperv/rndis_filter.c24
-rw-r--r--drivers/net/ieee802154/at86rf230.c82
-rw-r--r--drivers/net/ieee802154/cc2520.c37
-rw-r--r--drivers/net/ieee802154/mrf24j40.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/irda/ali-ircc.c11
-rw-r--r--drivers/net/irda/ali-ircc.h5
-rw-r--r--drivers/net/irda/au1k_ir.c3
-rw-r--r--drivers/net/irda/irda-usb.c10
-rw-r--r--drivers/net/irda/irda-usb.h5
-rw-r--r--drivers/net/irda/kingsun-sir.c3
-rw-r--r--drivers/net/irda/ks959-sir.c3
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/mcs7780.h1
-rw-r--r--drivers/net/irda/nsc-ircc.c7
-rw-r--r--drivers/net/irda/nsc-ircc.h5
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/stir4200.c16
-rw-r--r--drivers/net/irda/via-ircc.h4
-rw-r--r--drivers/net/irda/vlsi_ir.c46
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/mii.c12
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c981
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/mdio_bus.c14
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c22
-rw-r--r--drivers/net/team/team.c12
-rw-r--r--drivers/net/tun.c37
-rw-r--r--drivers/net/usb/hso.c106
-rw-r--r--drivers/net/usb/r8152.c229
-rw-r--r--drivers/net/usb/usbnet.c17
-rw-r--r--drivers/net/veth.c9
-rw-r--r--drivers/net/virtio_net.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h3
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c54
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c29
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h6
-rw-r--r--drivers/net/vxlan.c440
-rw-r--r--drivers/net/wireless/adm8211.c1
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile6
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c322
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h61
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c122
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c243
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c402
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c99
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c58
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h136
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c666
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c170
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h25
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c244
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h58
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h68
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h1064
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c2696
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h1444
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2238
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h449
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c80
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c315
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar956x_initvals.h1046
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h15
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c263
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c53
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h40
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c90
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h129
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_wow.h128
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c228
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c83
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c12
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c16
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c73
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c83
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h9
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig9
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c179
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c164
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c46
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c109
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c205
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c65
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c277
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c151
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h158
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h183
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform_msm.c257
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c239
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h70
-rw-r--r--drivers/net/wireless/atmel.c12
-rw-r--r--drivers/net/wireless/b43/Kconfig9
-rw-r--r--drivers/net/wireless/b43/Makefile1
-rw-r--r--drivers/net/wireless/b43/b43.h3
-rw-r--r--drivers/net/wireless/b43/main.c71
-rw-r--r--drivers/net/wireless/b43/phy_ac.c92
-rw-r--r--drivers/net/wireless/b43/phy_ac.h38
-rw-r--r--drivers/net/wireless/b43/phy_common.c9
-rw-r--r--drivers/net/wireless/b43/phy_common.h2
-rw-r--r--drivers/net/wireless/b43legacy/radio.c19
-rw-r--r--drivers/net/wireless/b43legacy/radio.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c90
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bus.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c227
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/common.c34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/common.h (renamed from drivers/net/wireless/ath/wil6210/wil_platform_msm.h)24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/commonring.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.c42
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/core.h34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h55
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c54
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c178
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/debug.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c32
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h12
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h4
-rw-r--r--drivers/net/wireless/cw1200/fwio.c40
-rw-r--r--drivers/net/wireless/cw1200/main.c6
-rw-r--r--drivers/net/wireless/cw1200/pm.c5
-rw-r--r--drivers/net/wireless/cw1200/queue.c4
-rw-r--r--drivers/net/wireless/cw1200/scan.c8
-rw-r--r--drivers/net/wireless/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c31
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c31
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c88
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scd.h41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h50
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h35
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c51
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c247
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h40
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h277
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h39
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h301
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c117
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c362
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h88
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c83
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c551
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h53
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c68
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c44
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tdls.c63
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c79
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h18
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c78
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c100
-rw-r--r--drivers/net/wireless/libertas/cfg.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c33
-rw-r--r--drivers/net/wireless/mwifiex/11h.c198
-rw-r--r--drivers/net/wireless/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/mwifiex/11n.h14
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c15
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c16
-rw-r--r--drivers/net/wireless/mwifiex/Makefile2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c951
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c22
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c46
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c281
-rw-r--r--drivers/net/wireless/mwifiex/decl.h55
-rw-r--r--drivers/net/wireless/mwifiex/ethtool.c16
-rw-r--r--drivers/net/wireless/mwifiex/fw.h61
-rw-r--r--drivers/net/wireless/mwifiex/ie.c89
-rw-r--r--drivers/net/wireless/mwifiex/init.c35
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h11
-rw-r--r--drivers/net/wireless/mwifiex/main.c147
-rw-r--r--drivers/net/wireless/mwifiex/main.h84
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c7
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h3
-rw-r--r--drivers/net/wireless/mwifiex/scan.c16
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c111
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h49
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c24
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c7
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c18
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c38
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c9
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c28
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c35
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c2
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c70
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c50
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c28
-rw-r--r--drivers/net/wireless/mwifiex/usb.c27
-rw-r--r--drivers/net/wireless/mwifiex/usb.h11
-rw-r--r--drivers/net/wireless/mwifiex/util.c222
-rw-r--r--drivers/net/wireless/mwifiex/util.h20
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/mwl8k.c12
-rw-r--r--drivers/net/wireless/orinoco/Kconfig3
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c4
-rw-r--r--drivers/net/wireless/p54/eeprom.c6
-rw-r--r--drivers/net/wireless/p54/fwio.c9
-rw-r--r--drivers/net/wireless/p54/main.c10
-rw-r--r--drivers/net/wireless/p54/p54pci.c7
-rw-r--r--drivers/net/wireless/p54/txrx.c12
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c18
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c18
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c8
-rw-r--r--drivers/net/wireless/rtlwifi/base.c156
-rw-r--r--drivers/net/wireless/rtlwifi/base.h4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c72
-rw-r--r--drivers/net/wireless/rtlwifi/core.h42
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c31
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.c36
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/dm.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c162
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c45
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c165
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c33
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.h38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c27
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/dm.c55
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/dm.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/fw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/hw.c166
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/reg.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/trx.c200
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/trx.h12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.h28
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.c42
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.h38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c162
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.c55
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/dm.h33
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/phy.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.c162
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/def.h54
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/dm.c58
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/dm.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/sw.c74
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/trx.c232
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h99
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c5
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c4
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c88
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h46
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.c93
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.h27
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h23
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c43
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c21
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h14
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c37
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h8
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h7
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c9
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h48
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c405
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h12
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h7
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netback/netback.c107
-rw-r--r--drivers/net/xen-netfront.c258
636 files changed, 44628 insertions, 13580 deletions
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 6c99ff0b0bdd..945f532078e9 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -78,6 +78,9 @@ static int com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
78 78
79 priv = devm_kzalloc(&pdev->dev, sizeof(struct com20020_priv), 79 priv = devm_kzalloc(&pdev->dev, sizeof(struct com20020_priv),
80 GFP_KERNEL); 80 GFP_KERNEL);
81 if (!priv)
82 return -ENOMEM;
83
81 ci = (struct com20020_pci_card_info *)id->driver_data; 84 ci = (struct com20020_pci_card_info *)id->driver_data;
82 priv->ci = ci; 85 priv->ci = ci;
83 86
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 8baa87df1738..cfc4a9c1000a 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -467,11 +467,14 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
467 /* set the partner sync. to on if the partner is sync, 467 /* set the partner sync. to on if the partner is sync,
468 * and the port is matched 468 * and the port is matched
469 */ 469 */
470 if ((port->sm_vars & AD_PORT_MATCHED) 470 if ((port->sm_vars & AD_PORT_MATCHED) &&
471 && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) 471 (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) {
472 partner->port_state |= AD_STATE_SYNCHRONIZATION; 472 partner->port_state |= AD_STATE_SYNCHRONIZATION;
473 else 473 pr_debug("%s partner sync=1\n", port->slave->dev->name);
474 } else {
474 partner->port_state &= ~AD_STATE_SYNCHRONIZATION; 475 partner->port_state &= ~AD_STATE_SYNCHRONIZATION;
476 pr_debug("%s partner sync=0\n", port->slave->dev->name);
477 }
475 } 478 }
476} 479}
477 480
@@ -726,6 +729,8 @@ static inline void __update_lacpdu_from_port(struct port *port)
726 lacpdu->actor_port_priority = htons(port->actor_port_priority); 729 lacpdu->actor_port_priority = htons(port->actor_port_priority);
727 lacpdu->actor_port = htons(port->actor_port_number); 730 lacpdu->actor_port = htons(port->actor_port_number);
728 lacpdu->actor_state = port->actor_oper_port_state; 731 lacpdu->actor_state = port->actor_oper_port_state;
732 pr_debug("update lacpdu: %s, actor port state %x\n",
733 port->slave->dev->name, port->actor_oper_port_state);
729 734
730 /* lacpdu->reserved_3_1 initialized 735 /* lacpdu->reserved_3_1 initialized
731 * lacpdu->tlv_type_partner_info initialized 736 * lacpdu->tlv_type_partner_info initialized
@@ -898,7 +903,9 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
898 if ((port->sm_vars & AD_PORT_SELECTED) && 903 if ((port->sm_vars & AD_PORT_SELECTED) &&
899 (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) && 904 (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) &&
900 !__check_agg_selection_timer(port)) { 905 !__check_agg_selection_timer(port)) {
901 port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING; 906 if (port->aggregator->is_active)
907 port->sm_mux_state =
908 AD_MUX_COLLECTING_DISTRIBUTING;
902 } else if (!(port->sm_vars & AD_PORT_SELECTED) || 909 } else if (!(port->sm_vars & AD_PORT_SELECTED) ||
903 (port->sm_vars & AD_PORT_STANDBY)) { 910 (port->sm_vars & AD_PORT_STANDBY)) {
904 /* if UNSELECTED or STANDBY */ 911 /* if UNSELECTED or STANDBY */
@@ -910,12 +917,16 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
910 */ 917 */
911 __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); 918 __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
912 port->sm_mux_state = AD_MUX_DETACHED; 919 port->sm_mux_state = AD_MUX_DETACHED;
920 } else if (port->aggregator->is_active) {
921 port->actor_oper_port_state |=
922 AD_STATE_SYNCHRONIZATION;
913 } 923 }
914 break; 924 break;
915 case AD_MUX_COLLECTING_DISTRIBUTING: 925 case AD_MUX_COLLECTING_DISTRIBUTING:
916 if (!(port->sm_vars & AD_PORT_SELECTED) || 926 if (!(port->sm_vars & AD_PORT_SELECTED) ||
917 (port->sm_vars & AD_PORT_STANDBY) || 927 (port->sm_vars & AD_PORT_STANDBY) ||
918 !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)) { 928 !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) ||
929 !(port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) {
919 port->sm_mux_state = AD_MUX_ATTACHED; 930 port->sm_mux_state = AD_MUX_ATTACHED;
920 } else { 931 } else {
921 /* if port state hasn't changed make 932 /* if port state hasn't changed make
@@ -937,8 +948,10 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
937 948
938 /* check if the state machine was changed */ 949 /* check if the state machine was changed */
939 if (port->sm_mux_state != last_state) { 950 if (port->sm_mux_state != last_state) {
940 pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n", 951 pr_debug("Mux Machine: Port=%d (%s), Last State=%d, Curr State=%d\n",
941 port->actor_port_number, last_state, 952 port->actor_port_number,
953 port->slave->dev->name,
954 last_state,
942 port->sm_mux_state); 955 port->sm_mux_state);
943 switch (port->sm_mux_state) { 956 switch (port->sm_mux_state) {
944 case AD_MUX_DETACHED: 957 case AD_MUX_DETACHED:
@@ -953,7 +966,12 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
953 port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0); 966 port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0);
954 break; 967 break;
955 case AD_MUX_ATTACHED: 968 case AD_MUX_ATTACHED:
956 port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION; 969 if (port->aggregator->is_active)
970 port->actor_oper_port_state |=
971 AD_STATE_SYNCHRONIZATION;
972 else
973 port->actor_oper_port_state &=
974 ~AD_STATE_SYNCHRONIZATION;
957 port->actor_oper_port_state &= ~AD_STATE_COLLECTING; 975 port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
958 port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; 976 port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
959 ad_disable_collecting_distributing(port, 977 ad_disable_collecting_distributing(port,
@@ -963,6 +981,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
963 case AD_MUX_COLLECTING_DISTRIBUTING: 981 case AD_MUX_COLLECTING_DISTRIBUTING:
964 port->actor_oper_port_state |= AD_STATE_COLLECTING; 982 port->actor_oper_port_state |= AD_STATE_COLLECTING;
965 port->actor_oper_port_state |= AD_STATE_DISTRIBUTING; 983 port->actor_oper_port_state |= AD_STATE_DISTRIBUTING;
984 port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
966 ad_enable_collecting_distributing(port, 985 ad_enable_collecting_distributing(port,
967 update_slave_arr); 986 update_slave_arr);
968 port->ntt = true; 987 port->ntt = true;
@@ -1044,8 +1063,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1044 1063
1045 /* check if the State machine was changed or new lacpdu arrived */ 1064 /* check if the State machine was changed or new lacpdu arrived */
1046 if ((port->sm_rx_state != last_state) || (lacpdu)) { 1065 if ((port->sm_rx_state != last_state) || (lacpdu)) {
1047 pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n", 1066 pr_debug("Rx Machine: Port=%d (%s), Last State=%d, Curr State=%d\n",
1048 port->actor_port_number, last_state, 1067 port->actor_port_number,
1068 port->slave->dev->name,
1069 last_state,
1049 port->sm_rx_state); 1070 port->sm_rx_state);
1050 switch (port->sm_rx_state) { 1071 switch (port->sm_rx_state) {
1051 case AD_RX_INITIALIZE: 1072 case AD_RX_INITIALIZE:
@@ -1394,6 +1415,9 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
1394 1415
1395 aggregator = __get_first_agg(port); 1416 aggregator = __get_first_agg(port);
1396 ad_agg_selection_logic(aggregator, update_slave_arr); 1417 ad_agg_selection_logic(aggregator, update_slave_arr);
1418
1419 if (!port->aggregator->is_active)
1420 port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
1397} 1421}
1398 1422
1399/* Decide if "agg" is a better choice for the new active aggregator that 1423/* Decide if "agg" is a better choice for the new active aggregator that
@@ -2195,8 +2219,10 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
2195 switch (lacpdu->subtype) { 2219 switch (lacpdu->subtype) {
2196 case AD_TYPE_LACPDU: 2220 case AD_TYPE_LACPDU:
2197 ret = RX_HANDLER_CONSUMED; 2221 ret = RX_HANDLER_CONSUMED;
2198 netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n", 2222 netdev_dbg(slave->bond->dev,
2199 port->actor_port_number); 2223 "Received LACPDU on port %d slave %s\n",
2224 port->actor_port_number,
2225 slave->dev->name);
2200 /* Protect against concurrent state machines */ 2226 /* Protect against concurrent state machines */
2201 spin_lock(&slave->bond->mode_lock); 2227 spin_lock(&slave->bond->mode_lock);
2202 ad_rx_machine(lacpdu, port); 2228 ad_rx_machine(lacpdu, port);
@@ -2288,7 +2314,10 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2288 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS; 2314 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
2289 port->actor_oper_port_key = port->actor_admin_port_key |= 2315 port->actor_oper_port_key = port->actor_admin_port_key |=
2290 __get_duplex(port); 2316 __get_duplex(port);
2291 netdev_dbg(slave->bond->dev, "Port %d changed duplex\n", port->actor_port_number); 2317 netdev_dbg(slave->bond->dev, "Port %d slave %s changed duplex\n",
2318 port->actor_port_number, slave->dev->name);
2319 if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
2320 port->sm_vars |= AD_PORT_LACP_ENABLED;
2292 /* there is no need to reselect a new aggregator, just signal the 2321 /* there is no need to reselect a new aggregator, just signal the
2293 * state machines to reinitialize 2322 * state machines to reinitialize
2294 */ 2323 */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0dceba1a2ba1..b979c265fc51 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -77,6 +77,7 @@
77#include <net/pkt_sched.h> 77#include <net/pkt_sched.h>
78#include <linux/rculist.h> 78#include <linux/rculist.h>
79#include <net/flow_keys.h> 79#include <net/flow_keys.h>
80#include <net/switchdev.h>
80#include <net/bonding.h> 81#include <net/bonding.h>
81#include <net/bond_3ad.h> 82#include <net/bond_3ad.h>
82#include <net/bond_alb.h> 83#include <net/bond_alb.h>
@@ -334,7 +335,7 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
334 * 335 *
335 * Returns zero if carrier state does not change, nonzero if it does. 336 * Returns zero if carrier state does not change, nonzero if it does.
336 */ 337 */
337static int bond_set_carrier(struct bonding *bond) 338int bond_set_carrier(struct bonding *bond)
338{ 339{
339 struct list_head *iter; 340 struct list_head *iter;
340 struct slave *slave; 341 struct slave *slave;
@@ -789,7 +790,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
789 } 790 }
790 791
791 new_active->delay = 0; 792 new_active->delay = 0;
792 new_active->link = BOND_LINK_UP; 793 bond_set_slave_link_state(new_active, BOND_LINK_UP);
793 794
794 if (BOND_MODE(bond) == BOND_MODE_8023AD) 795 if (BOND_MODE(bond) == BOND_MODE_8023AD)
795 bond_3ad_handle_link_change(new_active, BOND_LINK_UP); 796 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
@@ -979,7 +980,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
979 netdev_features_t mask; 980 netdev_features_t mask;
980 struct slave *slave; 981 struct slave *slave;
981 982
982 mask = features; 983 /* If any slave has the offload feature flag set,
984 * set the offload flag on the bond.
985 */
986 mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
987
983 features &= ~NETIF_F_ONE_FOR_ALL; 988 features &= ~NETIF_F_ONE_FOR_ALL;
984 features |= NETIF_F_ALL_FOR_ALL; 989 features |= NETIF_F_ALL_FOR_ALL;
985 990
@@ -998,7 +1003,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
998 NETIF_F_HIGHDMA | NETIF_F_LRO) 1003 NETIF_F_HIGHDMA | NETIF_F_LRO)
999 1004
1000#define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\ 1005#define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
1001 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL) 1006 NETIF_F_TSO)
1002 1007
1003static void bond_compute_features(struct bonding *bond) 1008static void bond_compute_features(struct bonding *bond)
1004{ 1009{
@@ -1034,7 +1039,7 @@ static void bond_compute_features(struct bonding *bond)
1034 1039
1035done: 1040done:
1036 bond_dev->vlan_features = vlan_features; 1041 bond_dev->vlan_features = vlan_features;
1037 bond_dev->hw_enc_features = enc_features; 1042 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
1038 bond_dev->hard_header_len = max_hard_header_len; 1043 bond_dev->hard_header_len = max_hard_header_len;
1039 bond_dev->gso_max_segs = gso_max_segs; 1044 bond_dev->gso_max_segs = gso_max_segs;
1040 netif_set_gso_max_size(bond_dev, gso_max_size); 1045 netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -1176,6 +1181,56 @@ static void bond_free_slave(struct slave *slave)
1176 kfree(slave); 1181 kfree(slave);
1177} 1182}
1178 1183
1184static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1185{
1186 info->bond_mode = BOND_MODE(bond);
1187 info->miimon = bond->params.miimon;
1188 info->num_slaves = bond->slave_cnt;
1189}
1190
1191static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1192{
1193 strcpy(info->slave_name, slave->dev->name);
1194 info->link = slave->link;
1195 info->state = bond_slave_state(slave);
1196 info->link_failure_count = slave->link_failure_count;
1197}
1198
1199static void bond_netdev_notify(struct net_device *dev,
1200 struct netdev_bonding_info *info)
1201{
1202 rtnl_lock();
1203 netdev_bonding_info_change(dev, info);
1204 rtnl_unlock();
1205}
1206
1207static void bond_netdev_notify_work(struct work_struct *_work)
1208{
1209 struct netdev_notify_work *w =
1210 container_of(_work, struct netdev_notify_work, work.work);
1211
1212 bond_netdev_notify(w->dev, &w->bonding_info);
1213 dev_put(w->dev);
1214 kfree(w);
1215}
1216
1217void bond_queue_slave_event(struct slave *slave)
1218{
1219 struct bonding *bond = slave->bond;
1220 struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
1221
1222 if (!nnw)
1223 return;
1224
1225 dev_hold(slave->dev);
1226 nnw->dev = slave->dev;
1227 bond_fill_ifslave(slave, &nnw->bonding_info.slave);
1228 bond_fill_ifbond(bond, &nnw->bonding_info.master);
1229 INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
1230
1231 queue_delayed_work(slave->bond->wq, &nnw->work, 0);
1232}
1233
1179/* enslave device <slave> to bond device <master> */ 1234/* enslave device <slave> to bond device <master> */
1180int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1235int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1181{ 1236{
@@ -1439,19 +1494,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1439 if (bond->params.miimon) { 1494 if (bond->params.miimon) {
1440 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { 1495 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1441 if (bond->params.updelay) { 1496 if (bond->params.updelay) {
1442 new_slave->link = BOND_LINK_BACK; 1497 bond_set_slave_link_state(new_slave,
1498 BOND_LINK_BACK);
1443 new_slave->delay = bond->params.updelay; 1499 new_slave->delay = bond->params.updelay;
1444 } else { 1500 } else {
1445 new_slave->link = BOND_LINK_UP; 1501 bond_set_slave_link_state(new_slave,
1502 BOND_LINK_UP);
1446 } 1503 }
1447 } else { 1504 } else {
1448 new_slave->link = BOND_LINK_DOWN; 1505 bond_set_slave_link_state(new_slave, BOND_LINK_DOWN);
1449 } 1506 }
1450 } else if (bond->params.arp_interval) { 1507 } else if (bond->params.arp_interval) {
1451 new_slave->link = (netif_carrier_ok(slave_dev) ? 1508 bond_set_slave_link_state(new_slave,
1452 BOND_LINK_UP : BOND_LINK_DOWN); 1509 (netif_carrier_ok(slave_dev) ?
1510 BOND_LINK_UP : BOND_LINK_DOWN));
1453 } else { 1511 } else {
1454 new_slave->link = BOND_LINK_UP; 1512 bond_set_slave_link_state(new_slave, BOND_LINK_UP);
1455 } 1513 }
1456 1514
1457 if (new_slave->link != BOND_LINK_DOWN) 1515 if (new_slave->link != BOND_LINK_DOWN)
@@ -1567,6 +1625,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1567 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down"); 1625 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
1568 1626
1569 /* enslave is successful */ 1627 /* enslave is successful */
1628 bond_queue_slave_event(new_slave);
1570 return 0; 1629 return 0;
1571 1630
1572/* Undo stages on error */ 1631/* Undo stages on error */
@@ -1816,11 +1875,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1816static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) 1875static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
1817{ 1876{
1818 struct bonding *bond = netdev_priv(bond_dev); 1877 struct bonding *bond = netdev_priv(bond_dev);
1819 1878 bond_fill_ifbond(bond, info);
1820 info->bond_mode = BOND_MODE(bond);
1821 info->miimon = bond->params.miimon;
1822 info->num_slaves = bond->slave_cnt;
1823
1824 return 0; 1879 return 0;
1825} 1880}
1826 1881
@@ -1834,10 +1889,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
1834 bond_for_each_slave(bond, slave, iter) { 1889 bond_for_each_slave(bond, slave, iter) {
1835 if (i++ == (int)info->slave_id) { 1890 if (i++ == (int)info->slave_id) {
1836 res = 0; 1891 res = 0;
1837 strcpy(info->slave_name, slave->dev->name); 1892 bond_fill_ifslave(slave, info);
1838 info->link = slave->link;
1839 info->state = bond_slave_state(slave);
1840 info->link_failure_count = slave->link_failure_count;
1841 break; 1893 break;
1842 } 1894 }
1843 } 1895 }
@@ -1867,7 +1919,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1867 if (link_state) 1919 if (link_state)
1868 continue; 1920 continue;
1869 1921
1870 slave->link = BOND_LINK_FAIL; 1922 bond_set_slave_link_state(slave, BOND_LINK_FAIL);
1871 slave->delay = bond->params.downdelay; 1923 slave->delay = bond->params.downdelay;
1872 if (slave->delay) { 1924 if (slave->delay) {
1873 netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", 1925 netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -1882,7 +1934,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1882 case BOND_LINK_FAIL: 1934 case BOND_LINK_FAIL:
1883 if (link_state) { 1935 if (link_state) {
1884 /* recovered before downdelay expired */ 1936 /* recovered before downdelay expired */
1885 slave->link = BOND_LINK_UP; 1937 bond_set_slave_link_state(slave, BOND_LINK_UP);
1886 slave->last_link_up = jiffies; 1938 slave->last_link_up = jiffies;
1887 netdev_info(bond->dev, "link status up again after %d ms for interface %s\n", 1939 netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
1888 (bond->params.downdelay - slave->delay) * 1940 (bond->params.downdelay - slave->delay) *
@@ -1904,7 +1956,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1904 if (!link_state) 1956 if (!link_state)
1905 continue; 1957 continue;
1906 1958
1907 slave->link = BOND_LINK_BACK; 1959 bond_set_slave_link_state(slave, BOND_LINK_BACK);
1908 slave->delay = bond->params.updelay; 1960 slave->delay = bond->params.updelay;
1909 1961
1910 if (slave->delay) { 1962 if (slave->delay) {
@@ -1917,7 +1969,8 @@ static int bond_miimon_inspect(struct bonding *bond)
1917 /*FALLTHRU*/ 1969 /*FALLTHRU*/
1918 case BOND_LINK_BACK: 1970 case BOND_LINK_BACK:
1919 if (!link_state) { 1971 if (!link_state) {
1920 slave->link = BOND_LINK_DOWN; 1972 bond_set_slave_link_state(slave,
1973 BOND_LINK_DOWN);
1921 netdev_info(bond->dev, "link status down again after %d ms for interface %s\n", 1974 netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
1922 (bond->params.updelay - slave->delay) * 1975 (bond->params.updelay - slave->delay) *
1923 bond->params.miimon, 1976 bond->params.miimon,
@@ -1955,7 +2008,7 @@ static void bond_miimon_commit(struct bonding *bond)
1955 continue; 2008 continue;
1956 2009
1957 case BOND_LINK_UP: 2010 case BOND_LINK_UP:
1958 slave->link = BOND_LINK_UP; 2011 bond_set_slave_link_state(slave, BOND_LINK_UP);
1959 slave->last_link_up = jiffies; 2012 slave->last_link_up = jiffies;
1960 2013
1961 primary = rtnl_dereference(bond->primary_slave); 2014 primary = rtnl_dereference(bond->primary_slave);
@@ -1995,7 +2048,7 @@ static void bond_miimon_commit(struct bonding *bond)
1995 if (slave->link_failure_count < UINT_MAX) 2048 if (slave->link_failure_count < UINT_MAX)
1996 slave->link_failure_count++; 2049 slave->link_failure_count++;
1997 2050
1998 slave->link = BOND_LINK_DOWN; 2051 bond_set_slave_link_state(slave, BOND_LINK_DOWN);
1999 2052
2000 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || 2053 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2001 BOND_MODE(bond) == BOND_MODE_8023AD) 2054 BOND_MODE(bond) == BOND_MODE_8023AD)
@@ -2578,7 +2631,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
2578 struct slave *current_arp_slave; 2631 struct slave *current_arp_slave;
2579 2632
2580 current_arp_slave = rtnl_dereference(bond->current_arp_slave); 2633 current_arp_slave = rtnl_dereference(bond->current_arp_slave);
2581 slave->link = BOND_LINK_UP; 2634 bond_set_slave_link_state(slave, BOND_LINK_UP);
2582 if (current_arp_slave) { 2635 if (current_arp_slave) {
2583 bond_set_slave_inactive_flags( 2636 bond_set_slave_inactive_flags(
2584 current_arp_slave, 2637 current_arp_slave,
@@ -2601,7 +2654,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
2601 if (slave->link_failure_count < UINT_MAX) 2654 if (slave->link_failure_count < UINT_MAX)
2602 slave->link_failure_count++; 2655 slave->link_failure_count++;
2603 2656
2604 slave->link = BOND_LINK_DOWN; 2657 bond_set_slave_link_state(slave, BOND_LINK_DOWN);
2605 bond_set_slave_inactive_flags(slave, 2658 bond_set_slave_inactive_flags(slave,
2606 BOND_SLAVE_NOTIFY_NOW); 2659 BOND_SLAVE_NOTIFY_NOW);
2607 2660
@@ -2680,7 +2733,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
2680 * up when it is actually down 2733 * up when it is actually down
2681 */ 2734 */
2682 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { 2735 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
2683 slave->link = BOND_LINK_DOWN; 2736 bond_set_slave_link_state(slave, BOND_LINK_DOWN);
2684 if (slave->link_failure_count < UINT_MAX) 2737 if (slave->link_failure_count < UINT_MAX)
2685 slave->link_failure_count++; 2738 slave->link_failure_count++;
2686 2739
@@ -2700,7 +2753,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
2700 if (!new_slave) 2753 if (!new_slave)
2701 goto check_state; 2754 goto check_state;
2702 2755
2703 new_slave->link = BOND_LINK_BACK; 2756 bond_set_slave_link_state(new_slave, BOND_LINK_BACK);
2704 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); 2757 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
2705 bond_arp_send_all(bond, new_slave); 2758 bond_arp_send_all(bond, new_slave);
2706 new_slave->last_link_up = jiffies; 2759 new_slave->last_link_up = jiffies;
@@ -3066,7 +3119,7 @@ static int bond_open(struct net_device *bond_dev)
3066 slave != rcu_access_pointer(bond->curr_active_slave)) { 3119 slave != rcu_access_pointer(bond->curr_active_slave)) {
3067 bond_set_slave_inactive_flags(slave, 3120 bond_set_slave_inactive_flags(slave,
3068 BOND_SLAVE_NOTIFY_NOW); 3121 BOND_SLAVE_NOTIFY_NOW);
3069 } else { 3122 } else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
3070 bond_set_slave_active_flags(slave, 3123 bond_set_slave_active_flags(slave,
3071 BOND_SLAVE_NOTIFY_NOW); 3124 BOND_SLAVE_NOTIFY_NOW);
3072 } 3125 }
@@ -3734,7 +3787,7 @@ out:
3734 * usable slave array is formed in the control path. The xmit function 3787 * usable slave array is formed in the control path. The xmit function
3735 * just calculates hash and sends the packet out. 3788 * just calculates hash and sends the packet out.
3736 */ 3789 */
3737int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) 3790static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
3738{ 3791{
3739 struct bonding *bond = netdev_priv(dev); 3792 struct bonding *bond = netdev_priv(dev);
3740 struct slave *slave; 3793 struct slave *slave;
@@ -3952,6 +4005,8 @@ static const struct net_device_ops bond_netdev_ops = {
3952 .ndo_add_slave = bond_enslave, 4005 .ndo_add_slave = bond_enslave,
3953 .ndo_del_slave = bond_release, 4006 .ndo_del_slave = bond_release,
3954 .ndo_fix_features = bond_fix_features, 4007 .ndo_fix_features = bond_fix_features,
4008 .ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
4009 .ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
3955}; 4010};
3956 4011
3957static const struct device_type bond_type = { 4012static const struct device_type bond_type = {
@@ -4010,7 +4065,7 @@ void bond_setup(struct net_device *bond_dev)
4010 NETIF_F_HW_VLAN_CTAG_FILTER; 4065 NETIF_F_HW_VLAN_CTAG_FILTER;
4011 4066
4012 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); 4067 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
4013 bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; 4068 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
4014 bond_dev->features |= bond_dev->hw_features; 4069 bond_dev->features |= bond_dev->hw_features;
4015} 4070}
4016 4071
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 1a61cc9b3402..4df28943d222 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -186,7 +186,7 @@ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
186 { NULL, -1, 0} 186 { NULL, -1, 0}
187}; 187};
188 188
189static const struct bond_option bond_opts[] = { 189static const struct bond_option bond_opts[BOND_OPT_LAST] = {
190 [BOND_OPT_MODE] = { 190 [BOND_OPT_MODE] = {
191 .id = BOND_OPT_MODE, 191 .id = BOND_OPT_MODE,
192 .name = "mode", 192 .name = "mode",
@@ -379,8 +379,7 @@ static const struct bond_option bond_opts[] = {
379 .values = bond_tlb_dynamic_lb_tbl, 379 .values = bond_tlb_dynamic_lb_tbl,
380 .flags = BOND_OPTFLAG_IFDOWN, 380 .flags = BOND_OPTFLAG_IFDOWN,
381 .set = bond_option_tlb_dynamic_lb_set, 381 .set = bond_option_tlb_dynamic_lb_set,
382 }, 382 }
383 { }
384}; 383};
385 384
386/* Searches for an option by name */ 385/* Searches for an option by name */
@@ -1182,6 +1181,7 @@ static int bond_option_min_links_set(struct bonding *bond,
1182 netdev_info(bond->dev, "Setting min links value to %llu\n", 1181 netdev_info(bond->dev, "Setting min links value to %llu\n",
1183 newval->value); 1182 newval->value);
1184 bond->params.min_links = newval->value; 1183 bond->params.min_links = newval->value;
1184 bond_set_carrier(bond);
1185 1185
1186 return 0; 1186 return 0;
1187} 1187}
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index d0c2463b573f..eeb4b8b6b335 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -138,7 +138,6 @@ struct at91_devtype_data {
138 138
139struct at91_priv { 139struct at91_priv {
140 struct can_priv can; /* must be the first member! */ 140 struct can_priv can; /* must be the first member! */
141 struct net_device *dev;
142 struct napi_struct napi; 141 struct napi_struct napi;
143 142
144 void __iomem *reg_base; 143 void __iomem *reg_base;
@@ -1350,7 +1349,6 @@ static int at91_can_probe(struct platform_device *pdev)
1350 priv->can.do_get_berr_counter = at91_get_berr_counter; 1349 priv->can.do_get_berr_counter = at91_get_berr_counter;
1351 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | 1350 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
1352 CAN_CTRLMODE_LISTENONLY; 1351 CAN_CTRLMODE_LISTENONLY;
1353 priv->dev = dev;
1354 priv->reg_base = addr; 1352 priv->reg_base = addr;
1355 priv->devtype_data = *devtype_data; 1353 priv->devtype_data = *devtype_data;
1356 priv->clk = clk; 1354 priv->clk = clk;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 417d50998e31..e7a6363e736b 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -352,6 +352,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
352 netdev_dbg(dev, "bus-off mode interrupt\n"); 352 netdev_dbg(dev, "bus-off mode interrupt\n");
353 state = CAN_STATE_BUS_OFF; 353 state = CAN_STATE_BUS_OFF;
354 cf->can_id |= CAN_ERR_BUSOFF; 354 cf->can_id |= CAN_ERR_BUSOFF;
355 priv->can.can_stats.bus_off++;
355 can_bus_off(dev); 356 can_bus_off(dev);
356 } 357 }
357 358
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index c672c4dcffac..041525d2595c 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -869,7 +869,7 @@ static int c_can_handle_state_change(struct net_device *dev,
869 case C_CAN_BUS_OFF: 869 case C_CAN_BUS_OFF:
870 /* bus-off state */ 870 /* bus-off state */
871 priv->can.state = CAN_STATE_BUS_OFF; 871 priv->can.state = CAN_STATE_BUS_OFF;
872 can_bus_off(dev); 872 priv->can.can_stats.bus_off++;
873 break; 873 break;
874 default: 874 default:
875 break; 875 break;
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c486fe510f37..c11d44984036 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -535,6 +535,7 @@ static int cc770_err(struct net_device *dev, u8 status)
535 cc770_write_reg(priv, control, CTRL_INI); 535 cc770_write_reg(priv, control, CTRL_INI);
536 cf->can_id |= CAN_ERR_BUSOFF; 536 cf->can_id |= CAN_ERR_BUSOFF;
537 priv->can.state = CAN_STATE_BUS_OFF; 537 priv->can.state = CAN_STATE_BUS_OFF;
538 priv->can.can_stats.bus_off++;
538 can_bus_off(dev); 539 can_bus_off(dev);
539 } else if (status & STAT_WARN) { 540 } else if (status & STAT_WARN) {
540 cf->can_id |= CAN_ERR_CRTL; 541 cf->can_id |= CAN_ERR_CRTL;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 847c1f813261..3c82e02e3dae 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -289,9 +289,11 @@ static void can_update_state_error_stats(struct net_device *dev,
289 priv->can_stats.error_passive++; 289 priv->can_stats.error_passive++;
290 break; 290 break;
291 case CAN_STATE_BUS_OFF: 291 case CAN_STATE_BUS_OFF:
292 priv->can_stats.bus_off++;
293 break;
292 default: 294 default:
293 break; 295 break;
294 }; 296 }
295} 297}
296 298
297static int can_tx_state_to_frame(struct net_device *dev, enum can_state state) 299static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
@@ -544,7 +546,6 @@ void can_bus_off(struct net_device *dev)
544 netdev_dbg(dev, "bus-off\n"); 546 netdev_dbg(dev, "bus-off\n");
545 547
546 netif_carrier_off(dev); 548 netif_carrier_off(dev);
547 priv->can_stats.bus_off++;
548 549
549 if (priv->restart_ms) 550 if (priv->restart_ms)
550 mod_timer(&priv->restart_timer, 551 mod_timer(&priv->restart_timer,
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index b1d583ba9674..80c46ad4cee4 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -247,7 +247,6 @@ struct flexcan_devtype_data {
247 247
248struct flexcan_priv { 248struct flexcan_priv {
249 struct can_priv can; 249 struct can_priv can;
250 struct net_device *dev;
251 struct napi_struct napi; 250 struct napi_struct napi;
252 251
253 void __iomem *base; 252 void __iomem *base;
@@ -1220,7 +1219,6 @@ static int flexcan_probe(struct platform_device *pdev)
1220 CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | 1219 CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES |
1221 CAN_CTRLMODE_BERR_REPORTING; 1220 CAN_CTRLMODE_BERR_REPORTING;
1222 priv->base = base; 1221 priv->base = base;
1223 priv->dev = dev;
1224 priv->clk_ipg = clk_ipg; 1222 priv->clk_ipg = clk_ipg;
1225 priv->clk_per = clk_per; 1223 priv->clk_per = clk_per;
1226 priv->pdata = dev_get_platdata(&pdev->dev); 1224 priv->pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 1b118394907f..4dd183a3643a 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1008,6 +1008,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
1008 if (status & SR_BS) { 1008 if (status & SR_BS) {
1009 state = CAN_STATE_BUS_OFF; 1009 state = CAN_STATE_BUS_OFF;
1010 cf->can_id |= CAN_ERR_BUSOFF; 1010 cf->can_id |= CAN_ERR_BUSOFF;
1011 mod->can.can_stats.bus_off++;
1011 can_bus_off(dev); 1012 can_bus_off(dev);
1012 } else if (status & SR_ES) { 1013 } else if (status & SR_ES) {
1013 if (rxerr >= 128 || txerr >= 128) 1014 if (rxerr >= 128 || txerr >= 128)
@@ -1678,8 +1679,7 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
1678 if (ret) 1679 if (ret)
1679 return ret; 1680 return ret;
1680 1681
1681 ret = wait_for_completion_timeout(&mod->buserror_comp, HZ); 1682 if (!wait_for_completion_timeout(&mod->buserror_comp, HZ)) {
1682 if (ret == 0) {
1683 netdev_info(mod->ndev, "%s timed out\n", __func__); 1683 netdev_info(mod->ndev, "%s timed out\n", __func__);
1684 return -ETIMEDOUT; 1684 return -ETIMEDOUT;
1685 } 1685 }
@@ -1704,8 +1704,7 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
1704 if (ret) 1704 if (ret)
1705 return ret; 1705 return ret;
1706 1706
1707 ret = wait_for_completion_timeout(&mod->termination_comp, HZ); 1707 if (!wait_for_completion_timeout(&mod->termination_comp, HZ)) {
1708 if (ret == 0) {
1709 netdev_info(mod->ndev, "%s timed out\n", __func__); 1708 netdev_info(mod->ndev, "%s timed out\n", __func__);
1710 return -ETIMEDOUT; 1709 return -ETIMEDOUT;
1711 } 1710 }
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 244529881be9..2e04b3aeeb37 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -589,6 +589,7 @@ static int m_can_handle_state_change(struct net_device *dev,
589 /* bus-off state */ 589 /* bus-off state */
590 priv->can.state = CAN_STATE_BUS_OFF; 590 priv->can.state = CAN_STATE_BUS_OFF;
591 m_can_disable_all_interrupts(priv); 591 m_can_disable_all_interrupts(priv);
592 priv->can.can_stats.bus_off++;
592 can_bus_off(dev); 593 can_bus_off(dev);
593 break; 594 break;
594 default: 595 default:
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index a67eb01f3028..e187ca783da0 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -505,6 +505,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
505 pch_can_set_rx_all(priv, 0); 505 pch_can_set_rx_all(priv, 0);
506 state = CAN_STATE_BUS_OFF; 506 state = CAN_STATE_BUS_OFF;
507 cf->can_id |= CAN_ERR_BUSOFF; 507 cf->can_id |= CAN_ERR_BUSOFF;
508 priv->can.can_stats.bus_off++;
508 can_bus_off(ndev); 509 can_bus_off(ndev);
509 } 510 }
510 511
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 91cd48ca0efc..7deb80dcbe8c 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -331,6 +331,7 @@ static void rcar_can_error(struct net_device *ndev)
331 priv->can.state = CAN_STATE_BUS_OFF; 331 priv->can.state = CAN_STATE_BUS_OFF;
332 /* Clear interrupt condition */ 332 /* Clear interrupt condition */
333 writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr); 333 writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
334 priv->can.can_stats.bus_off++;
334 can_bus_off(ndev); 335 can_bus_off(ndev);
335 if (skb) 336 if (skb)
336 cf->can_id |= CAN_ERR_BUSOFF; 337 cf->can_id |= CAN_ERR_BUSOFF;
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 2bf98d862302..7621f91a8a20 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -261,6 +261,7 @@ static int softing_handle_1(struct softing *card)
261 ++priv->can.can_stats.error_passive; 261 ++priv->can.can_stats.error_passive;
262 else if (can_state == CAN_STATE_BUS_OFF) { 262 else if (can_state == CAN_STATE_BUS_OFF) {
263 /* this calls can_close_cleanup() */ 263 /* this calls can_close_cleanup() */
264 ++priv->can.can_stats.bus_off;
264 can_bus_off(netdev); 265 can_bus_off(netdev);
265 netif_stop_queue(netdev); 266 netif_stop_queue(netdev);
266 } 267 }
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index c66d699640a9..bf63fee4e743 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -905,6 +905,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
905 if (priv->can.state == CAN_STATE_BUS_OFF) { 905 if (priv->can.state == CAN_STATE_BUS_OFF) {
906 if (priv->can.restart_ms == 0) { 906 if (priv->can.restart_ms == 0) {
907 priv->force_quit = 1; 907 priv->force_quit = 1;
908 priv->can.can_stats.bus_off++;
908 can_bus_off(net); 909 can_bus_off(net);
909 mcp251x_hw_sleep(spi); 910 mcp251x_hw_sleep(spi);
910 break; 911 break;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 9a07eafe554b..e95a9e1a889f 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -715,6 +715,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
715 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); 715 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
716 /* Disable all interrupts in bus-off to avoid int hog */ 716 /* Disable all interrupts in bus-off to avoid int hog */
717 hecc_write(priv, HECC_CANGIM, 0); 717 hecc_write(priv, HECC_CANGIM, 0);
718 ++priv->can.can_stats.bus_off;
718 can_bus_off(ndev); 719 can_bus_off(ndev);
719 } 720 }
720 721
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index a77db919363c..bcb272f6c68a 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -25,7 +25,7 @@ config CAN_KVASER_USB
25 tristate "Kvaser CAN/USB interface" 25 tristate "Kvaser CAN/USB interface"
26 ---help--- 26 ---help---
27 This driver adds support for Kvaser CAN/USB devices like Kvaser 27 This driver adds support for Kvaser CAN/USB devices like Kvaser
28 Leaf Light. 28 Leaf Light and Kvaser USBcan II.
29 29
30 The driver provides support for the following devices: 30 The driver provides support for the following devices:
31 - Kvaser Leaf Light 31 - Kvaser Leaf Light
@@ -46,6 +46,12 @@ config CAN_KVASER_USB
46 - Kvaser USBcan R 46 - Kvaser USBcan R
47 - Kvaser Leaf Light v2 47 - Kvaser Leaf Light v2
48 - Kvaser Mini PCI Express HS 48 - Kvaser Mini PCI Express HS
49 - Kvaser USBcan II HS/HS
50 - Kvaser USBcan II HS/LS
51 - Kvaser USBcan Rugged ("USBcan Rev B")
52 - Kvaser Memorator HS/HS
53 - Kvaser Memorator HS/LS
54 - Scania VCI2 (if you have the Kvaser logo on top)
49 55
50 If unsure, say N. 56 If unsure, say N.
51 57
@@ -53,10 +59,18 @@ config CAN_KVASER_USB
53 module will be called kvaser_usb. 59 module will be called kvaser_usb.
54 60
55config CAN_PEAK_USB 61config CAN_PEAK_USB
56 tristate "PEAK PCAN-USB/USB Pro interfaces" 62 tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
57 ---help--- 63 ---help---
58 This driver supports the PCAN-USB and PCAN-USB Pro adapters 64 This driver supports the PEAK-System Technik USB adapters that enable
59 from PEAK-System Technik (http://www.peak-system.com). 65 access to the CAN bus, with repect to the CAN 2.0b and/or CAN-FD
66 standards, that is:
67
68 PCAN-USB single CAN 2.0b channel USB adapter
69 PCAN-USB Pro dual CAN 2.0b channels USB adapter
70 PCAN-USB FD single CAN-FD channel USB adapter
71 PCAN-USB Pro FD dual CAN-FD channels USB adapter
72
73 (see also http://www.peak-system.com).
60 74
61config CAN_8DEV_USB 75config CAN_8DEV_USB
62 tristate "8 devices USB2CAN interface" 76 tristate "8 devices USB2CAN interface"
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 29d3f0938eb8..9376f5e5b94e 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -347,6 +347,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
347 dev->can.state = CAN_STATE_BUS_OFF; 347 dev->can.state = CAN_STATE_BUS_OFF;
348 cf->can_id |= CAN_ERR_BUSOFF; 348 cf->can_id |= CAN_ERR_BUSOFF;
349 349
350 dev->can.can_stats.bus_off++;
350 can_bus_off(dev->netdev); 351 can_bus_off(dev->netdev);
351 } else if (state & SJA1000_SR_ES) { 352 } else if (state & SJA1000_SR_ES) {
352 dev->can.state = CAN_STATE_ERROR_WARNING; 353 dev->can.state = CAN_STATE_ERROR_WARNING;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index c063a54ab8dd..bacca0bd89c1 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -250,6 +250,7 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
250 case ESD_BUSSTATE_BUSOFF: 250 case ESD_BUSSTATE_BUSOFF:
251 priv->can.state = CAN_STATE_BUS_OFF; 251 priv->can.state = CAN_STATE_BUS_OFF;
252 cf->can_id |= CAN_ERR_BUSOFF; 252 cf->can_id |= CAN_ERR_BUSOFF;
253 priv->can.can_stats.bus_off++;
253 can_bus_off(priv->netdev); 254 can_bus_off(priv->netdev);
254 break; 255 break;
255 case ESD_BUSSTATE_WARN: 256 case ESD_BUSSTATE_WARN:
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 7af379ca861b..2928f7003041 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -6,10 +6,12 @@
6 * Parts of this driver are based on the following: 6 * Parts of this driver are based on the following:
7 * - Kvaser linux leaf driver (version 4.78) 7 * - Kvaser linux leaf driver (version 4.78)
8 * - CAN driver for esd CAN-USB/2 8 * - CAN driver for esd CAN-USB/2
9 * - Kvaser linux usbcanII driver (version 5.3)
9 * 10 *
10 * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved. 11 * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
11 * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh 12 * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
12 * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be> 13 * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
14 * Copyright (C) 2015 Valeo S.A.
13 */ 15 */
14 16
15#include <linux/completion.h> 17#include <linux/completion.h>
@@ -30,8 +32,9 @@
30#define RX_BUFFER_SIZE 3072 32#define RX_BUFFER_SIZE 3072
31#define CAN_USB_CLOCK 8000000 33#define CAN_USB_CLOCK 8000000
32#define MAX_NET_DEVICES 3 34#define MAX_NET_DEVICES 3
35#define MAX_USBCAN_NET_DEVICES 2
33 36
34/* Kvaser USB devices */ 37/* Kvaser Leaf USB devices */
35#define KVASER_VENDOR_ID 0x0bfd 38#define KVASER_VENDOR_ID 0x0bfd
36#define USB_LEAF_DEVEL_PRODUCT_ID 10 39#define USB_LEAF_DEVEL_PRODUCT_ID 10
37#define USB_LEAF_LITE_PRODUCT_ID 11 40#define USB_LEAF_LITE_PRODUCT_ID 11
@@ -56,6 +59,24 @@
56#define USB_LEAF_LITE_V2_PRODUCT_ID 288 59#define USB_LEAF_LITE_V2_PRODUCT_ID 288
57#define USB_MINI_PCIE_HS_PRODUCT_ID 289 60#define USB_MINI_PCIE_HS_PRODUCT_ID 289
58 61
62static inline bool kvaser_is_leaf(const struct usb_device_id *id)
63{
64 return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
65 id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
66}
67
68/* Kvaser USBCan-II devices */
69#define USB_USBCAN_REVB_PRODUCT_ID 2
70#define USB_VCI2_PRODUCT_ID 3
71#define USB_USBCAN2_PRODUCT_ID 4
72#define USB_MEMORATOR_PRODUCT_ID 5
73
74static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
75{
76 return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
77 id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
78}
79
59/* USB devices features */ 80/* USB devices features */
60#define KVASER_HAS_SILENT_MODE BIT(0) 81#define KVASER_HAS_SILENT_MODE BIT(0)
61#define KVASER_HAS_TXRX_ERRORS BIT(1) 82#define KVASER_HAS_TXRX_ERRORS BIT(1)
@@ -73,7 +94,7 @@
73#define MSG_FLAG_TX_ACK BIT(6) 94#define MSG_FLAG_TX_ACK BIT(6)
74#define MSG_FLAG_TX_REQUEST BIT(7) 95#define MSG_FLAG_TX_REQUEST BIT(7)
75 96
76/* Can states */ 97/* Can states (M16C CxSTRH register) */
77#define M16C_STATE_BUS_RESET BIT(0) 98#define M16C_STATE_BUS_RESET BIT(0)
78#define M16C_STATE_BUS_ERROR BIT(4) 99#define M16C_STATE_BUS_ERROR BIT(4)
79#define M16C_STATE_BUS_PASSIVE BIT(5) 100#define M16C_STATE_BUS_PASSIVE BIT(5)
@@ -98,7 +119,11 @@
98#define CMD_START_CHIP_REPLY 27 119#define CMD_START_CHIP_REPLY 27
99#define CMD_STOP_CHIP 28 120#define CMD_STOP_CHIP 28
100#define CMD_STOP_CHIP_REPLY 29 121#define CMD_STOP_CHIP_REPLY 29
101#define CMD_GET_CARD_INFO2 32 122
123#define CMD_LEAF_GET_CARD_INFO2 32
124#define CMD_USBCAN_RESET_CLOCK 32
125#define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33
126
102#define CMD_GET_CARD_INFO 34 127#define CMD_GET_CARD_INFO 34
103#define CMD_GET_CARD_INFO_REPLY 35 128#define CMD_GET_CARD_INFO_REPLY 35
104#define CMD_GET_SOFTWARE_INFO 38 129#define CMD_GET_SOFTWARE_INFO 38
@@ -108,8 +133,9 @@
108#define CMD_RESET_ERROR_COUNTER 49 133#define CMD_RESET_ERROR_COUNTER 49
109#define CMD_TX_ACKNOWLEDGE 50 134#define CMD_TX_ACKNOWLEDGE 50
110#define CMD_CAN_ERROR_EVENT 51 135#define CMD_CAN_ERROR_EVENT 51
111#define CMD_USB_THROTTLE 77 136
112#define CMD_LOG_MESSAGE 106 137#define CMD_LEAF_USB_THROTTLE 77
138#define CMD_LEAF_LOG_MESSAGE 106
113 139
114/* error factors */ 140/* error factors */
115#define M16C_EF_ACKE BIT(0) 141#define M16C_EF_ACKE BIT(0)
@@ -121,6 +147,14 @@
121#define M16C_EF_RCVE BIT(6) 147#define M16C_EF_RCVE BIT(6)
122#define M16C_EF_TRE BIT(7) 148#define M16C_EF_TRE BIT(7)
123 149
150/* Only Leaf-based devices can report M16C error factors,
151 * thus define our own error status flags for USBCANII
152 */
153#define USBCAN_ERROR_STATE_NONE 0
154#define USBCAN_ERROR_STATE_TX_ERROR BIT(0)
155#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
156#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
157
124/* bittiming parameters */ 158/* bittiming parameters */
125#define KVASER_USB_TSEG1_MIN 1 159#define KVASER_USB_TSEG1_MIN 1
126#define KVASER_USB_TSEG1_MAX 16 160#define KVASER_USB_TSEG1_MAX 16
@@ -137,9 +171,18 @@
137#define KVASER_CTRL_MODE_SELFRECEPTION 3 171#define KVASER_CTRL_MODE_SELFRECEPTION 3
138#define KVASER_CTRL_MODE_OFF 4 172#define KVASER_CTRL_MODE_OFF 4
139 173
140/* log message */ 174/* Extended CAN identifier flag */
141#define KVASER_EXTENDED_FRAME BIT(31) 175#define KVASER_EXTENDED_FRAME BIT(31)
142 176
177/* Kvaser USB CAN dongles are divided into two major families:
178 * - Leaf: Based on Renesas M32C, running firmware labeled as 'filo'
179 * - UsbcanII: Based on Renesas M16C, running firmware labeled as 'helios'
180 */
181enum kvaser_usb_family {
182 KVASER_LEAF,
183 KVASER_USBCAN,
184};
185
143struct kvaser_msg_simple { 186struct kvaser_msg_simple {
144 u8 tid; 187 u8 tid;
145 u8 channel; 188 u8 channel;
@@ -148,30 +191,55 @@ struct kvaser_msg_simple {
148struct kvaser_msg_cardinfo { 191struct kvaser_msg_cardinfo {
149 u8 tid; 192 u8 tid;
150 u8 nchannels; 193 u8 nchannels;
151 __le32 serial_number; 194 union {
152 __le32 padding; 195 struct {
196 __le32 serial_number;
197 __le32 padding;
198 } __packed leaf0;
199 struct {
200 __le32 serial_number_low;
201 __le32 serial_number_high;
202 } __packed usbcan0;
203 } __packed;
153 __le32 clock_resolution; 204 __le32 clock_resolution;
154 __le32 mfgdate; 205 __le32 mfgdate;
155 u8 ean[8]; 206 u8 ean[8];
156 u8 hw_revision; 207 u8 hw_revision;
157 u8 usb_hs_mode; 208 union {
158 __le16 padding2; 209 struct {
210 u8 usb_hs_mode;
211 } __packed leaf1;
212 struct {
213 u8 padding;
214 } __packed usbcan1;
215 } __packed;
216 __le16 padding;
159} __packed; 217} __packed;
160 218
161struct kvaser_msg_cardinfo2 { 219struct kvaser_msg_cardinfo2 {
162 u8 tid; 220 u8 tid;
163 u8 channel; 221 u8 reserved;
164 u8 pcb_id[24]; 222 u8 pcb_id[24];
165 __le32 oem_unlock_code; 223 __le32 oem_unlock_code;
166} __packed; 224} __packed;
167 225
168struct kvaser_msg_softinfo { 226struct leaf_msg_softinfo {
169 u8 tid; 227 u8 tid;
170 u8 channel; 228 u8 padding0;
171 __le32 sw_options; 229 __le32 sw_options;
172 __le32 fw_version; 230 __le32 fw_version;
173 __le16 max_outstanding_tx; 231 __le16 max_outstanding_tx;
174 __le16 padding[9]; 232 __le16 padding1[9];
233} __packed;
234
235struct usbcan_msg_softinfo {
236 u8 tid;
237 u8 fw_name[5];
238 __le16 max_outstanding_tx;
239 u8 padding[6];
240 __le32 fw_version;
241 __le16 checksum;
242 __le16 sw_options;
175} __packed; 243} __packed;
176 244
177struct kvaser_msg_busparams { 245struct kvaser_msg_busparams {
@@ -188,36 +256,86 @@ struct kvaser_msg_tx_can {
188 u8 channel; 256 u8 channel;
189 u8 tid; 257 u8 tid;
190 u8 msg[14]; 258 u8 msg[14];
191 u8 padding; 259 union {
192 u8 flags; 260 struct {
261 u8 padding;
262 u8 flags;
263 } __packed leaf;
264 struct {
265 u8 flags;
266 u8 padding;
267 } __packed usbcan;
268 } __packed;
269} __packed;
270
271struct kvaser_msg_rx_can_header {
272 u8 channel;
273 u8 flag;
193} __packed; 274} __packed;
194 275
195struct kvaser_msg_rx_can { 276struct leaf_msg_rx_can {
196 u8 channel; 277 u8 channel;
197 u8 flag; 278 u8 flag;
279
198 __le16 time[3]; 280 __le16 time[3];
199 u8 msg[14]; 281 u8 msg[14];
200} __packed; 282} __packed;
201 283
202struct kvaser_msg_chip_state_event { 284struct usbcan_msg_rx_can {
285 u8 channel;
286 u8 flag;
287
288 u8 msg[14];
289 __le16 time;
290} __packed;
291
292struct leaf_msg_chip_state_event {
203 u8 tid; 293 u8 tid;
204 u8 channel; 294 u8 channel;
295
205 __le16 time[3]; 296 __le16 time[3];
206 u8 tx_errors_count; 297 u8 tx_errors_count;
207 u8 rx_errors_count; 298 u8 rx_errors_count;
299
208 u8 status; 300 u8 status;
209 u8 padding[3]; 301 u8 padding[3];
210} __packed; 302} __packed;
211 303
212struct kvaser_msg_tx_acknowledge { 304struct usbcan_msg_chip_state_event {
305 u8 tid;
306 u8 channel;
307
308 u8 tx_errors_count;
309 u8 rx_errors_count;
310 __le16 time;
311
312 u8 status;
313 u8 padding[3];
314} __packed;
315
316struct kvaser_msg_tx_acknowledge_header {
213 u8 channel; 317 u8 channel;
214 u8 tid; 318 u8 tid;
319} __packed;
320
321struct leaf_msg_tx_acknowledge {
322 u8 channel;
323 u8 tid;
324
215 __le16 time[3]; 325 __le16 time[3];
216 u8 flags; 326 u8 flags;
217 u8 time_offset; 327 u8 time_offset;
218} __packed; 328} __packed;
219 329
220struct kvaser_msg_error_event { 330struct usbcan_msg_tx_acknowledge {
331 u8 channel;
332 u8 tid;
333
334 __le16 time;
335 __le16 padding;
336} __packed;
337
338struct leaf_msg_error_event {
221 u8 tid; 339 u8 tid;
222 u8 flags; 340 u8 flags;
223 __le16 time[3]; 341 __le16 time[3];
@@ -229,6 +347,18 @@ struct kvaser_msg_error_event {
229 u8 error_factor; 347 u8 error_factor;
230} __packed; 348} __packed;
231 349
350struct usbcan_msg_error_event {
351 u8 tid;
352 u8 padding;
353 u8 tx_errors_count_ch0;
354 u8 rx_errors_count_ch0;
355 u8 tx_errors_count_ch1;
356 u8 rx_errors_count_ch1;
357 u8 status_ch0;
358 u8 status_ch1;
359 __le16 time;
360} __packed;
361
232struct kvaser_msg_ctrl_mode { 362struct kvaser_msg_ctrl_mode {
233 u8 tid; 363 u8 tid;
234 u8 channel; 364 u8 channel;
@@ -243,7 +373,7 @@ struct kvaser_msg_flush_queue {
243 u8 padding[3]; 373 u8 padding[3];
244} __packed; 374} __packed;
245 375
246struct kvaser_msg_log_message { 376struct leaf_msg_log_message {
247 u8 channel; 377 u8 channel;
248 u8 flags; 378 u8 flags;
249 __le16 time[3]; 379 __le16 time[3];
@@ -260,19 +390,57 @@ struct kvaser_msg {
260 struct kvaser_msg_simple simple; 390 struct kvaser_msg_simple simple;
261 struct kvaser_msg_cardinfo cardinfo; 391 struct kvaser_msg_cardinfo cardinfo;
262 struct kvaser_msg_cardinfo2 cardinfo2; 392 struct kvaser_msg_cardinfo2 cardinfo2;
263 struct kvaser_msg_softinfo softinfo;
264 struct kvaser_msg_busparams busparams; 393 struct kvaser_msg_busparams busparams;
394
395 struct kvaser_msg_rx_can_header rx_can_header;
396 struct kvaser_msg_tx_acknowledge_header tx_acknowledge_header;
397
398 union {
399 struct leaf_msg_softinfo softinfo;
400 struct leaf_msg_rx_can rx_can;
401 struct leaf_msg_chip_state_event chip_state_event;
402 struct leaf_msg_tx_acknowledge tx_acknowledge;
403 struct leaf_msg_error_event error_event;
404 struct leaf_msg_log_message log_message;
405 } __packed leaf;
406
407 union {
408 struct usbcan_msg_softinfo softinfo;
409 struct usbcan_msg_rx_can rx_can;
410 struct usbcan_msg_chip_state_event chip_state_event;
411 struct usbcan_msg_tx_acknowledge tx_acknowledge;
412 struct usbcan_msg_error_event error_event;
413 } __packed usbcan;
414
265 struct kvaser_msg_tx_can tx_can; 415 struct kvaser_msg_tx_can tx_can;
266 struct kvaser_msg_rx_can rx_can;
267 struct kvaser_msg_chip_state_event chip_state_event;
268 struct kvaser_msg_tx_acknowledge tx_acknowledge;
269 struct kvaser_msg_error_event error_event;
270 struct kvaser_msg_ctrl_mode ctrl_mode; 416 struct kvaser_msg_ctrl_mode ctrl_mode;
271 struct kvaser_msg_flush_queue flush_queue; 417 struct kvaser_msg_flush_queue flush_queue;
272 struct kvaser_msg_log_message log_message;
273 } u; 418 } u;
274} __packed; 419} __packed;
275 420
421/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
422 * handling. Some discrepancies between the two families exist:
423 *
424 * - USBCAN firmware does not report M16C "error factors"
425 * - USBCAN controllers has difficulties reporting if the raised error
426 * event is for ch0 or ch1. They leave such arbitration to the OS
427 * driver by letting it compare error counters with previous values
428 * and decide the error event's channel. Thus for USBCAN, the channel
429 * field is only advisory.
430 */
431struct kvaser_usb_error_summary {
432 u8 channel, status, txerr, rxerr;
433 union {
434 struct {
435 u8 error_factor;
436 } leaf;
437 struct {
438 u8 other_ch_status;
439 u8 error_state;
440 } usbcan;
441 };
442};
443
276struct kvaser_usb_tx_urb_context { 444struct kvaser_usb_tx_urb_context {
277 struct kvaser_usb_net_priv *priv; 445 struct kvaser_usb_net_priv *priv;
278 u32 echo_index; 446 u32 echo_index;
@@ -288,6 +456,7 @@ struct kvaser_usb {
288 456
289 u32 fw_version; 457 u32 fw_version;
290 unsigned int nchannels; 458 unsigned int nchannels;
459 enum kvaser_usb_family family;
291 460
292 bool rxinitdone; 461 bool rxinitdone;
293 void *rxbuf[MAX_RX_URBS]; 462 void *rxbuf[MAX_RX_URBS];
@@ -311,6 +480,7 @@ struct kvaser_usb_net_priv {
311}; 480};
312 481
313static const struct usb_device_id kvaser_usb_table[] = { 482static const struct usb_device_id kvaser_usb_table[] = {
483 /* Leaf family IDs */
314 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) }, 484 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
315 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) }, 485 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
316 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID), 486 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
@@ -360,6 +530,17 @@ static const struct usb_device_id kvaser_usb_table[] = {
360 .driver_info = KVASER_HAS_TXRX_ERRORS }, 530 .driver_info = KVASER_HAS_TXRX_ERRORS },
361 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) }, 531 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
362 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) }, 532 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
533
534 /* USBCANII family IDs */
535 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
536 .driver_info = KVASER_HAS_TXRX_ERRORS },
537 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
538 .driver_info = KVASER_HAS_TXRX_ERRORS },
539 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
540 .driver_info = KVASER_HAS_TXRX_ERRORS },
541 { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
542 .driver_info = KVASER_HAS_TXRX_ERRORS },
543
363 { } 544 { }
364}; 545};
365MODULE_DEVICE_TABLE(usb, kvaser_usb_table); 546MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -463,7 +644,14 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
463 if (err) 644 if (err)
464 return err; 645 return err;
465 646
466 dev->fw_version = le32_to_cpu(msg.u.softinfo.fw_version); 647 switch (dev->family) {
648 case KVASER_LEAF:
649 dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
650 break;
651 case KVASER_USBCAN:
652 dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
653 break;
654 }
467 655
468 return 0; 656 return 0;
469} 657}
@@ -482,7 +670,9 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
482 return err; 670 return err;
483 671
484 dev->nchannels = msg.u.cardinfo.nchannels; 672 dev->nchannels = msg.u.cardinfo.nchannels;
485 if (dev->nchannels > MAX_NET_DEVICES) 673 if ((dev->nchannels > MAX_NET_DEVICES) ||
674 (dev->family == KVASER_USBCAN &&
675 dev->nchannels > MAX_USBCAN_NET_DEVICES))
486 return -EINVAL; 676 return -EINVAL;
487 677
488 return 0; 678 return 0;
@@ -496,8 +686,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
496 struct kvaser_usb_net_priv *priv; 686 struct kvaser_usb_net_priv *priv;
497 struct sk_buff *skb; 687 struct sk_buff *skb;
498 struct can_frame *cf; 688 struct can_frame *cf;
499 u8 channel = msg->u.tx_acknowledge.channel; 689 u8 channel, tid;
500 u8 tid = msg->u.tx_acknowledge.tid; 690
691 channel = msg->u.tx_acknowledge_header.channel;
692 tid = msg->u.tx_acknowledge_header.tid;
501 693
502 if (channel >= dev->nchannels) { 694 if (channel >= dev->nchannels) {
503 dev_err(dev->udev->dev.parent, 695 dev_err(dev->udev->dev.parent,
@@ -615,158 +807,280 @@ static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
615 priv->tx_contexts[i].echo_index = MAX_TX_URBS; 807 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
616} 808}
617 809
618static void kvaser_usb_rx_error(const struct kvaser_usb *dev, 810static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
619 const struct kvaser_msg *msg) 811 const struct kvaser_usb_error_summary *es,
812 struct can_frame *cf)
620{ 813{
621 struct can_frame *cf; 814 struct kvaser_usb *dev = priv->dev;
622 struct sk_buff *skb; 815 struct net_device_stats *stats = &priv->netdev->stats;
623 struct net_device_stats *stats; 816 enum can_state cur_state, new_state, tx_state, rx_state;
624 struct kvaser_usb_net_priv *priv;
625 unsigned int new_state;
626 u8 channel, status, txerr, rxerr, error_factor;
627 817
628 switch (msg->id) { 818 netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status);
629 case CMD_CAN_ERROR_EVENT: 819
630 channel = msg->u.error_event.channel; 820 new_state = cur_state = priv->can.state;
631 status = msg->u.error_event.status; 821
632 txerr = msg->u.error_event.tx_errors_count; 822 if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET))
633 rxerr = msg->u.error_event.rx_errors_count; 823 new_state = CAN_STATE_BUS_OFF;
634 error_factor = msg->u.error_event.error_factor; 824 else if (es->status & M16C_STATE_BUS_PASSIVE)
635 break; 825 new_state = CAN_STATE_ERROR_PASSIVE;
636 case CMD_LOG_MESSAGE: 826 else if (es->status & M16C_STATE_BUS_ERROR) {
637 channel = msg->u.log_message.channel; 827 /* Guard against spurious error events after a busoff */
638 status = msg->u.log_message.data[0]; 828 if (cur_state < CAN_STATE_BUS_OFF) {
639 txerr = msg->u.log_message.data[2]; 829 if ((es->txerr >= 128) || (es->rxerr >= 128))
640 rxerr = msg->u.log_message.data[3]; 830 new_state = CAN_STATE_ERROR_PASSIVE;
641 error_factor = msg->u.log_message.data[1]; 831 else if ((es->txerr >= 96) || (es->rxerr >= 96))
832 new_state = CAN_STATE_ERROR_WARNING;
833 else if (cur_state > CAN_STATE_ERROR_ACTIVE)
834 new_state = CAN_STATE_ERROR_ACTIVE;
835 }
836 }
837
838 if (!es->status)
839 new_state = CAN_STATE_ERROR_ACTIVE;
840
841 if (new_state != cur_state) {
842 tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
843 rx_state = (es->txerr <= es->rxerr) ? new_state : 0;
844
845 can_change_state(priv->netdev, cf, tx_state, rx_state);
846 }
847
848 if (priv->can.restart_ms &&
849 (cur_state >= CAN_STATE_BUS_OFF) &&
850 (new_state < CAN_STATE_BUS_OFF)) {
851 priv->can.can_stats.restarts++;
852 }
853
854 switch (dev->family) {
855 case KVASER_LEAF:
856 if (es->leaf.error_factor) {
857 priv->can.can_stats.bus_error++;
858 stats->rx_errors++;
859 }
642 break; 860 break;
643 case CMD_CHIP_STATE_EVENT: 861 case KVASER_USBCAN:
644 channel = msg->u.chip_state_event.channel; 862 if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR)
645 status = msg->u.chip_state_event.status; 863 stats->tx_errors++;
646 txerr = msg->u.chip_state_event.tx_errors_count; 864 if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR)
647 rxerr = msg->u.chip_state_event.rx_errors_count; 865 stats->rx_errors++;
648 error_factor = 0; 866 if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
867 priv->can.can_stats.bus_error++;
868 }
649 break; 869 break;
650 default:
651 dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
652 msg->id);
653 return;
654 } 870 }
655 871
656 if (channel >= dev->nchannels) { 872 priv->bec.txerr = es->txerr;
873 priv->bec.rxerr = es->rxerr;
874}
875
876static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
877 const struct kvaser_usb_error_summary *es)
878{
879 struct can_frame *cf, tmp_cf = { .can_id = CAN_ERR_FLAG, .can_dlc = CAN_ERR_DLC };
880 struct sk_buff *skb;
881 struct net_device_stats *stats;
882 struct kvaser_usb_net_priv *priv;
883 enum can_state old_state, new_state;
884
885 if (es->channel >= dev->nchannels) {
657 dev_err(dev->udev->dev.parent, 886 dev_err(dev->udev->dev.parent,
658 "Invalid channel number (%d)\n", channel); 887 "Invalid channel number (%d)\n", es->channel);
659 return; 888 return;
660 } 889 }
661 890
662 priv = dev->nets[channel]; 891 priv = dev->nets[es->channel];
663 stats = &priv->netdev->stats; 892 stats = &priv->netdev->stats;
664 893
894 /* Update all of the can interface's state and error counters before
895 * trying any memory allocation that can actually fail with -ENOMEM.
896 *
897 * We send a temporary stack-allocated error can frame to
898 * can_change_state() for the very same reason.
899 *
900 * TODO: Split can_change_state() responsibility between updating the
901 * can interface's state and counters, and the setting up of can error
902 * frame ID and data to userspace. Remove stack allocation afterwards.
903 */
904 old_state = priv->can.state;
905 kvaser_usb_rx_error_update_can_state(priv, es, &tmp_cf);
906 new_state = priv->can.state;
907
665 skb = alloc_can_err_skb(priv->netdev, &cf); 908 skb = alloc_can_err_skb(priv->netdev, &cf);
666 if (!skb) { 909 if (!skb) {
667 stats->rx_dropped++; 910 stats->rx_dropped++;
668 return; 911 return;
669 } 912 }
913 memcpy(cf, &tmp_cf, sizeof(*cf));
914
915 if (new_state != old_state) {
916 if (es->status &
917 (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
918 if (!priv->can.restart_ms)
919 kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
920 netif_carrier_off(priv->netdev);
921 }
670 922
671 new_state = priv->can.state; 923 if (priv->can.restart_ms &&
672 924 (old_state >= CAN_STATE_BUS_OFF) &&
673 netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); 925 (new_state < CAN_STATE_BUS_OFF)) {
926 cf->can_id |= CAN_ERR_RESTARTED;
927 netif_carrier_on(priv->netdev);
928 }
929 }
674 930
675 if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { 931 switch (dev->family) {
676 cf->can_id |= CAN_ERR_BUSOFF; 932 case KVASER_LEAF:
933 if (es->leaf.error_factor) {
934 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
935
936 if (es->leaf.error_factor & M16C_EF_ACKE)
937 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
938 if (es->leaf.error_factor & M16C_EF_CRCE)
939 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
940 CAN_ERR_PROT_LOC_CRC_DEL);
941 if (es->leaf.error_factor & M16C_EF_FORME)
942 cf->data[2] |= CAN_ERR_PROT_FORM;
943 if (es->leaf.error_factor & M16C_EF_STFE)
944 cf->data[2] |= CAN_ERR_PROT_STUFF;
945 if (es->leaf.error_factor & M16C_EF_BITE0)
946 cf->data[2] |= CAN_ERR_PROT_BIT0;
947 if (es->leaf.error_factor & M16C_EF_BITE1)
948 cf->data[2] |= CAN_ERR_PROT_BIT1;
949 if (es->leaf.error_factor & M16C_EF_TRE)
950 cf->data[2] |= CAN_ERR_PROT_TX;
951 }
952 break;
953 case KVASER_USBCAN:
954 if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) {
955 cf->can_id |= CAN_ERR_BUSERROR;
956 }
957 break;
958 }
677 959
678 priv->can.can_stats.bus_off++; 960 cf->data[6] = es->txerr;
679 if (!priv->can.restart_ms) 961 cf->data[7] = es->rxerr;
680 kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
681 962
682 netif_carrier_off(priv->netdev); 963 stats->rx_packets++;
964 stats->rx_bytes += cf->can_dlc;
965 netif_rx(skb);
966}
683 967
684 new_state = CAN_STATE_BUS_OFF; 968/* For USBCAN, report error to userspace iff the channels's errors counter
685 } else if (status & M16C_STATE_BUS_PASSIVE) { 969 * has changed, or we're the only channel seeing a bus error state.
686 if (priv->can.state != CAN_STATE_ERROR_PASSIVE) { 970 */
687 cf->can_id |= CAN_ERR_CRTL; 971static void kvaser_usbcan_conditionally_rx_error(const struct kvaser_usb *dev,
688 972 struct kvaser_usb_error_summary *es)
689 if (txerr || rxerr) 973{
690 cf->data[1] = (txerr > rxerr) 974 struct kvaser_usb_net_priv *priv;
691 ? CAN_ERR_CRTL_TX_PASSIVE 975 int channel;
692 : CAN_ERR_CRTL_RX_PASSIVE; 976 bool report_error;
693 else
694 cf->data[1] = CAN_ERR_CRTL_TX_PASSIVE |
695 CAN_ERR_CRTL_RX_PASSIVE;
696
697 priv->can.can_stats.error_passive++;
698 }
699 977
700 new_state = CAN_STATE_ERROR_PASSIVE; 978 channel = es->channel;
701 } else if (status & M16C_STATE_BUS_ERROR) { 979 if (channel >= dev->nchannels) {
702 if ((priv->can.state < CAN_STATE_ERROR_WARNING) && 980 dev_err(dev->udev->dev.parent,
703 ((txerr >= 96) || (rxerr >= 96))) { 981 "Invalid channel number (%d)\n", channel);
704 cf->can_id |= CAN_ERR_CRTL; 982 return;
705 cf->data[1] = (txerr > rxerr)
706 ? CAN_ERR_CRTL_TX_WARNING
707 : CAN_ERR_CRTL_RX_WARNING;
708
709 priv->can.can_stats.error_warning++;
710 new_state = CAN_STATE_ERROR_WARNING;
711 } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
712 ((txerr < 96) && (rxerr < 96))) {
713 cf->can_id |= CAN_ERR_PROT;
714 cf->data[2] = CAN_ERR_PROT_ACTIVE;
715
716 new_state = CAN_STATE_ERROR_ACTIVE;
717 }
718 } 983 }
719 984
720 if (!status) { 985 priv = dev->nets[channel];
721 cf->can_id |= CAN_ERR_PROT; 986 report_error = false;
722 cf->data[2] = CAN_ERR_PROT_ACTIVE;
723 987
724 new_state = CAN_STATE_ERROR_ACTIVE; 988 if (es->txerr != priv->bec.txerr) {
989 es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR;
990 report_error = true;
991 }
992 if (es->rxerr != priv->bec.rxerr) {
993 es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR;
994 report_error = true;
995 }
996 if ((es->status & M16C_STATE_BUS_ERROR) &&
997 !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) {
998 es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR;
999 report_error = true;
725 } 1000 }
726 1001
727 if (priv->can.restart_ms && 1002 if (report_error)
728 (priv->can.state >= CAN_STATE_BUS_OFF) && 1003 kvaser_usb_rx_error(dev, es);
729 (new_state < CAN_STATE_BUS_OFF)) { 1004}
730 cf->can_id |= CAN_ERR_RESTARTED;
731 netif_carrier_on(priv->netdev);
732 1005
733 priv->can.can_stats.restarts++; 1006static void kvaser_usbcan_rx_error(const struct kvaser_usb *dev,
734 } 1007 const struct kvaser_msg *msg)
1008{
1009 struct kvaser_usb_error_summary es = { };
735 1010
736 if (error_factor) { 1011 switch (msg->id) {
737 priv->can.can_stats.bus_error++; 1012 /* Sometimes errors are sent as unsolicited chip state events */
738 stats->rx_errors++; 1013 case CMD_CHIP_STATE_EVENT:
1014 es.channel = msg->u.usbcan.chip_state_event.channel;
1015 es.status = msg->u.usbcan.chip_state_event.status;
1016 es.txerr = msg->u.usbcan.chip_state_event.tx_errors_count;
1017 es.rxerr = msg->u.usbcan.chip_state_event.rx_errors_count;
1018 kvaser_usbcan_conditionally_rx_error(dev, &es);
1019 break;
739 1020
740 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 1021 case CMD_CAN_ERROR_EVENT:
741 1022 es.channel = 0;
742 if (error_factor & M16C_EF_ACKE) 1023 es.status = msg->u.usbcan.error_event.status_ch0;
743 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK); 1024 es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch0;
744 if (error_factor & M16C_EF_CRCE) 1025 es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch0;
745 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 1026 es.usbcan.other_ch_status =
746 CAN_ERR_PROT_LOC_CRC_DEL); 1027 msg->u.usbcan.error_event.status_ch1;
747 if (error_factor & M16C_EF_FORME) 1028 kvaser_usbcan_conditionally_rx_error(dev, &es);
748 cf->data[2] |= CAN_ERR_PROT_FORM; 1029
749 if (error_factor & M16C_EF_STFE) 1030 /* The USBCAN firmware supports up to 2 channels.
750 cf->data[2] |= CAN_ERR_PROT_STUFF; 1031 * Now that ch0 was checked, check if ch1 has any errors.
751 if (error_factor & M16C_EF_BITE0) 1032 */
752 cf->data[2] |= CAN_ERR_PROT_BIT0; 1033 if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
753 if (error_factor & M16C_EF_BITE1) 1034 es.channel = 1;
754 cf->data[2] |= CAN_ERR_PROT_BIT1; 1035 es.status = msg->u.usbcan.error_event.status_ch1;
755 if (error_factor & M16C_EF_TRE) 1036 es.txerr = msg->u.usbcan.error_event.tx_errors_count_ch1;
756 cf->data[2] |= CAN_ERR_PROT_TX; 1037 es.rxerr = msg->u.usbcan.error_event.rx_errors_count_ch1;
757 } 1038 es.usbcan.other_ch_status =
1039 msg->u.usbcan.error_event.status_ch0;
1040 kvaser_usbcan_conditionally_rx_error(dev, &es);
1041 }
1042 break;
758 1043
759 cf->data[6] = txerr; 1044 default:
760 cf->data[7] = rxerr; 1045 dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
1046 msg->id);
1047 }
1048}
761 1049
762 priv->bec.txerr = txerr; 1050static void kvaser_leaf_rx_error(const struct kvaser_usb *dev,
763 priv->bec.rxerr = rxerr; 1051 const struct kvaser_msg *msg)
1052{
1053 struct kvaser_usb_error_summary es = { };
764 1054
765 priv->can.state = new_state; 1055 switch (msg->id) {
1056 case CMD_CAN_ERROR_EVENT:
1057 es.channel = msg->u.leaf.error_event.channel;
1058 es.status = msg->u.leaf.error_event.status;
1059 es.txerr = msg->u.leaf.error_event.tx_errors_count;
1060 es.rxerr = msg->u.leaf.error_event.rx_errors_count;
1061 es.leaf.error_factor = msg->u.leaf.error_event.error_factor;
1062 break;
1063 case CMD_LEAF_LOG_MESSAGE:
1064 es.channel = msg->u.leaf.log_message.channel;
1065 es.status = msg->u.leaf.log_message.data[0];
1066 es.txerr = msg->u.leaf.log_message.data[2];
1067 es.rxerr = msg->u.leaf.log_message.data[3];
1068 es.leaf.error_factor = msg->u.leaf.log_message.data[1];
1069 break;
1070 case CMD_CHIP_STATE_EVENT:
1071 es.channel = msg->u.leaf.chip_state_event.channel;
1072 es.status = msg->u.leaf.chip_state_event.status;
1073 es.txerr = msg->u.leaf.chip_state_event.tx_errors_count;
1074 es.rxerr = msg->u.leaf.chip_state_event.rx_errors_count;
1075 es.leaf.error_factor = 0;
1076 break;
1077 default:
1078 dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
1079 msg->id);
1080 return;
1081 }
766 1082
767 stats->rx_packets++; 1083 kvaser_usb_rx_error(dev, &es);
768 stats->rx_bytes += cf->can_dlc;
769 netif_rx(skb);
770} 1084}
771 1085
772static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, 1086static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
@@ -776,16 +1090,19 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
776 struct sk_buff *skb; 1090 struct sk_buff *skb;
777 struct net_device_stats *stats = &priv->netdev->stats; 1091 struct net_device_stats *stats = &priv->netdev->stats;
778 1092
779 if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | 1093 if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
780 MSG_FLAG_NERR)) { 1094 MSG_FLAG_NERR)) {
781 netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n", 1095 netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
782 msg->u.rx_can.flag); 1096 msg->u.rx_can_header.flag);
783 1097
784 stats->rx_errors++; 1098 stats->rx_errors++;
785 return; 1099 return;
786 } 1100 }
787 1101
788 if (msg->u.rx_can.flag & MSG_FLAG_OVERRUN) { 1102 if (msg->u.rx_can_header.flag & MSG_FLAG_OVERRUN) {
1103 stats->rx_over_errors++;
1104 stats->rx_errors++;
1105
789 skb = alloc_can_err_skb(priv->netdev, &cf); 1106 skb = alloc_can_err_skb(priv->netdev, &cf);
790 if (!skb) { 1107 if (!skb) {
791 stats->rx_dropped++; 1108 stats->rx_dropped++;
@@ -795,9 +1112,6 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
795 cf->can_id |= CAN_ERR_CRTL; 1112 cf->can_id |= CAN_ERR_CRTL;
796 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 1113 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
797 1114
798 stats->rx_over_errors++;
799 stats->rx_errors++;
800
801 stats->rx_packets++; 1115 stats->rx_packets++;
802 stats->rx_bytes += cf->can_dlc; 1116 stats->rx_bytes += cf->can_dlc;
803 netif_rx(skb); 1117 netif_rx(skb);
@@ -811,7 +1125,8 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
811 struct can_frame *cf; 1125 struct can_frame *cf;
812 struct sk_buff *skb; 1126 struct sk_buff *skb;
813 struct net_device_stats *stats; 1127 struct net_device_stats *stats;
814 u8 channel = msg->u.rx_can.channel; 1128 u8 channel = msg->u.rx_can_header.channel;
1129 const u8 *rx_msg = NULL; /* GCC */
815 1130
816 if (channel >= dev->nchannels) { 1131 if (channel >= dev->nchannels) {
817 dev_err(dev->udev->dev.parent, 1132 dev_err(dev->udev->dev.parent,
@@ -822,60 +1137,68 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
822 priv = dev->nets[channel]; 1137 priv = dev->nets[channel];
823 stats = &priv->netdev->stats; 1138 stats = &priv->netdev->stats;
824 1139
825 if ((msg->u.rx_can.flag & MSG_FLAG_ERROR_FRAME) && 1140 if ((msg->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
826 (msg->id == CMD_LOG_MESSAGE)) { 1141 (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE)) {
827 kvaser_usb_rx_error(dev, msg); 1142 kvaser_leaf_rx_error(dev, msg);
828 return; 1143 return;
829 } else if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | 1144 } else if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
830 MSG_FLAG_NERR | 1145 MSG_FLAG_NERR |
831 MSG_FLAG_OVERRUN)) { 1146 MSG_FLAG_OVERRUN)) {
832 kvaser_usb_rx_can_err(priv, msg); 1147 kvaser_usb_rx_can_err(priv, msg);
833 return; 1148 return;
834 } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) { 1149 } else if (msg->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) {
835 netdev_warn(priv->netdev, 1150 netdev_warn(priv->netdev,
836 "Unhandled frame (flags: 0x%02x)", 1151 "Unhandled frame (flags: 0x%02x)",
837 msg->u.rx_can.flag); 1152 msg->u.rx_can_header.flag);
838 return; 1153 return;
839 } 1154 }
840 1155
1156 switch (dev->family) {
1157 case KVASER_LEAF:
1158 rx_msg = msg->u.leaf.rx_can.msg;
1159 break;
1160 case KVASER_USBCAN:
1161 rx_msg = msg->u.usbcan.rx_can.msg;
1162 break;
1163 }
1164
841 skb = alloc_can_skb(priv->netdev, &cf); 1165 skb = alloc_can_skb(priv->netdev, &cf);
842 if (!skb) { 1166 if (!skb) {
843 stats->tx_dropped++; 1167 stats->tx_dropped++;
844 return; 1168 return;
845 } 1169 }
846 1170
847 if (msg->id == CMD_LOG_MESSAGE) { 1171 if (dev->family == KVASER_LEAF && msg->id == CMD_LEAF_LOG_MESSAGE) {
848 cf->can_id = le32_to_cpu(msg->u.log_message.id); 1172 cf->can_id = le32_to_cpu(msg->u.leaf.log_message.id);
849 if (cf->can_id & KVASER_EXTENDED_FRAME) 1173 if (cf->can_id & KVASER_EXTENDED_FRAME)
850 cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; 1174 cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
851 else 1175 else
852 cf->can_id &= CAN_SFF_MASK; 1176 cf->can_id &= CAN_SFF_MASK;
853 1177
854 cf->can_dlc = get_can_dlc(msg->u.log_message.dlc); 1178 cf->can_dlc = get_can_dlc(msg->u.leaf.log_message.dlc);
855 1179
856 if (msg->u.log_message.flags & MSG_FLAG_REMOTE_FRAME) 1180 if (msg->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
857 cf->can_id |= CAN_RTR_FLAG; 1181 cf->can_id |= CAN_RTR_FLAG;
858 else 1182 else
859 memcpy(cf->data, &msg->u.log_message.data, 1183 memcpy(cf->data, &msg->u.leaf.log_message.data,
860 cf->can_dlc); 1184 cf->can_dlc);
861 } else { 1185 } else {
862 cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) | 1186 cf->can_id = ((rx_msg[0] & 0x1f) << 6) | (rx_msg[1] & 0x3f);
863 (msg->u.rx_can.msg[1] & 0x3f);
864 1187
865 if (msg->id == CMD_RX_EXT_MESSAGE) { 1188 if (msg->id == CMD_RX_EXT_MESSAGE) {
866 cf->can_id <<= 18; 1189 cf->can_id <<= 18;
867 cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) | 1190 cf->can_id |= ((rx_msg[2] & 0x0f) << 14) |
868 ((msg->u.rx_can.msg[3] & 0xff) << 6) | 1191 ((rx_msg[3] & 0xff) << 6) |
869 (msg->u.rx_can.msg[4] & 0x3f); 1192 (rx_msg[4] & 0x3f);
870 cf->can_id |= CAN_EFF_FLAG; 1193 cf->can_id |= CAN_EFF_FLAG;
871 } 1194 }
872 1195
873 cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]); 1196 cf->can_dlc = get_can_dlc(rx_msg[5]);
874 1197
875 if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME) 1198 if (msg->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
876 cf->can_id |= CAN_RTR_FLAG; 1199 cf->can_id |= CAN_RTR_FLAG;
877 else 1200 else
878 memcpy(cf->data, &msg->u.rx_can.msg[6], 1201 memcpy(cf->data, &rx_msg[6],
879 cf->can_dlc); 1202 cf->can_dlc);
880 } 1203 }
881 1204
@@ -938,21 +1261,35 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
938 1261
939 case CMD_RX_STD_MESSAGE: 1262 case CMD_RX_STD_MESSAGE:
940 case CMD_RX_EXT_MESSAGE: 1263 case CMD_RX_EXT_MESSAGE:
941 case CMD_LOG_MESSAGE: 1264 kvaser_usb_rx_can_msg(dev, msg);
1265 break;
1266
1267 case CMD_LEAF_LOG_MESSAGE:
1268 if (dev->family != KVASER_LEAF)
1269 goto warn;
942 kvaser_usb_rx_can_msg(dev, msg); 1270 kvaser_usb_rx_can_msg(dev, msg);
943 break; 1271 break;
944 1272
945 case CMD_CHIP_STATE_EVENT: 1273 case CMD_CHIP_STATE_EVENT:
946 case CMD_CAN_ERROR_EVENT: 1274 case CMD_CAN_ERROR_EVENT:
947 kvaser_usb_rx_error(dev, msg); 1275 if (dev->family == KVASER_LEAF)
1276 kvaser_leaf_rx_error(dev, msg);
1277 else
1278 kvaser_usbcan_rx_error(dev, msg);
948 break; 1279 break;
949 1280
950 case CMD_TX_ACKNOWLEDGE: 1281 case CMD_TX_ACKNOWLEDGE:
951 kvaser_usb_tx_acknowledge(dev, msg); 1282 kvaser_usb_tx_acknowledge(dev, msg);
952 break; 1283 break;
953 1284
1285 /* Ignored messages */
1286 case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
1287 if (dev->family != KVASER_USBCAN)
1288 goto warn;
1289 break;
1290
954 default: 1291 default:
955 dev_warn(dev->udev->dev.parent, 1292warn: dev_warn(dev->udev->dev.parent,
956 "Unhandled message (%d)\n", msg->id); 1293 "Unhandled message (%d)\n", msg->id);
957 break; 1294 break;
958 } 1295 }
@@ -1172,7 +1509,7 @@ static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
1172 dev->rxbuf[i], 1509 dev->rxbuf[i],
1173 dev->rxbuf_dma[i]); 1510 dev->rxbuf_dma[i]);
1174 1511
1175 for (i = 0; i < MAX_NET_DEVICES; i++) { 1512 for (i = 0; i < dev->nchannels; i++) {
1176 struct kvaser_usb_net_priv *priv = dev->nets[i]; 1513 struct kvaser_usb_net_priv *priv = dev->nets[i];
1177 1514
1178 if (priv) 1515 if (priv)
@@ -1280,6 +1617,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1280 struct kvaser_msg *msg; 1617 struct kvaser_msg *msg;
1281 int i, err; 1618 int i, err;
1282 int ret = NETDEV_TX_OK; 1619 int ret = NETDEV_TX_OK;
1620 u8 *msg_tx_can_flags = NULL; /* GCC */
1283 1621
1284 if (can_dropped_invalid_skb(netdev, skb)) 1622 if (can_dropped_invalid_skb(netdev, skb))
1285 return NETDEV_TX_OK; 1623 return NETDEV_TX_OK;
@@ -1301,9 +1639,19 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1301 1639
1302 msg = buf; 1640 msg = buf;
1303 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can); 1641 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
1304 msg->u.tx_can.flags = 0;
1305 msg->u.tx_can.channel = priv->channel; 1642 msg->u.tx_can.channel = priv->channel;
1306 1643
1644 switch (dev->family) {
1645 case KVASER_LEAF:
1646 msg_tx_can_flags = &msg->u.tx_can.leaf.flags;
1647 break;
1648 case KVASER_USBCAN:
1649 msg_tx_can_flags = &msg->u.tx_can.usbcan.flags;
1650 break;
1651 }
1652
1653 *msg_tx_can_flags = 0;
1654
1307 if (cf->can_id & CAN_EFF_FLAG) { 1655 if (cf->can_id & CAN_EFF_FLAG) {
1308 msg->id = CMD_TX_EXT_MESSAGE; 1656 msg->id = CMD_TX_EXT_MESSAGE;
1309 msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f; 1657 msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
@@ -1321,7 +1669,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1321 memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc); 1669 memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
1322 1670
1323 if (cf->can_id & CAN_RTR_FLAG) 1671 if (cf->can_id & CAN_RTR_FLAG)
1324 msg->u.tx_can.flags |= MSG_FLAG_REMOTE_FRAME; 1672 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
1325 1673
1326 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { 1674 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
1327 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { 1675 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
@@ -1590,6 +1938,17 @@ static int kvaser_usb_probe(struct usb_interface *intf,
1590 if (!dev) 1938 if (!dev)
1591 return -ENOMEM; 1939 return -ENOMEM;
1592 1940
1941 if (kvaser_is_leaf(id)) {
1942 dev->family = KVASER_LEAF;
1943 } else if (kvaser_is_usbcan(id)) {
1944 dev->family = KVASER_USBCAN;
1945 } else {
1946 dev_err(&intf->dev,
1947 "Product ID (%d) does not belong to any known Kvaser USB family",
1948 id->idProduct);
1949 return -ENODEV;
1950 }
1951
1593 err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out); 1952 err = kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
1594 if (err) { 1953 if (err) {
1595 dev_err(&intf->dev, "Cannot get usb endpoint(s)"); 1954 dev_err(&intf->dev, "Cannot get usb endpoint(s)");
diff --git a/drivers/net/can/usb/peak_usb/Makefile b/drivers/net/can/usb/peak_usb/Makefile
index 1aefbc88d643..1839e9ca62e7 100644
--- a/drivers/net/can/usb/peak_usb/Makefile
+++ b/drivers/net/can/usb/peak_usb/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_CAN_PEAK_USB) += peak_usb.o 1obj-$(CONFIG_CAN_PEAK_USB) += peak_usb.o
2peak_usb-y = pcan_usb_core.o pcan_usb.o pcan_usb_pro.o 2peak_usb-y = pcan_usb_core.o pcan_usb.o pcan_usb_pro.o pcan_usb_fd.o
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
new file mode 100644
index 000000000000..1ba7c25002e1
--- /dev/null
+++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h
@@ -0,0 +1,222 @@
1/*
2 * CAN driver for PEAK System micro-CAN based adapters
3 *
4 * Copyright (C) 2003-2011 PEAK System-Technik GmbH
5 * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published
9 * by the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16#ifndef PUCAN_H
17#define PUCAN_H
18
19/* uCAN commands opcodes list (low-order 10 bits) */
20#define PUCAN_CMD_NOP 0x000
21#define PUCAN_CMD_RESET_MODE 0x001
22#define PUCAN_CMD_NORMAL_MODE 0x002
23#define PUCAN_CMD_LISTEN_ONLY_MODE 0x003
24#define PUCAN_CMD_TIMING_SLOW 0x004
25#define PUCAN_CMD_TIMING_FAST 0x005
26#define PUCAN_CMD_FILTER_STD 0x008
27#define PUCAN_CMD_TX_ABORT 0x009
28#define PUCAN_CMD_WR_ERR_CNT 0x00a
29#define PUCAN_CMD_RX_FRAME_ENABLE 0x00b
30#define PUCAN_CMD_RX_FRAME_DISABLE 0x00c
31#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
32
33/* uCAN received messages list */
34#define PUCAN_MSG_CAN_RX 0x0001
35#define PUCAN_MSG_ERROR 0x0002
36#define PUCAN_MSG_STATUS 0x0003
37#define PUCAN_MSG_BUSLOAD 0x0004
38#define PUCAN_MSG_CAN_TX 0x1000
39
40/* uCAN command common header */
41struct __packed pucan_command {
42 __le16 opcode_channel;
43 u16 args[3];
44};
45
46/* uCAN TIMING_SLOW command fields */
47#define PUCAN_TSLOW_SJW_T(s, t) (((s) & 0xf) | ((!!(t)) << 7))
48#define PUCAN_TSLOW_TSEG2(t) ((t) & 0xf)
49#define PUCAN_TSLOW_TSEG1(t) ((t) & 0x3f)
50#define PUCAN_TSLOW_BRP(b) ((b) & 0x3ff)
51
52struct __packed pucan_timing_slow {
53 __le16 opcode_channel;
54
55 u8 ewl; /* Error Warning limit */
56 u8 sjw_t; /* Sync Jump Width + Triple sampling */
57 u8 tseg2; /* Timing SEGment 2 */
58 u8 tseg1; /* Timing SEGment 1 */
59
60 __le16 brp; /* BaudRate Prescaler */
61};
62
63/* uCAN TIMING_FAST command fields */
64#define PUCAN_TFAST_SJW(s) ((s) & 0x3)
65#define PUCAN_TFAST_TSEG2(t) ((t) & 0x7)
66#define PUCAN_TFAST_TSEG1(t) ((t) & 0xf)
67#define PUCAN_TFAST_BRP(b) ((b) & 0x3ff)
68
69struct __packed pucan_timing_fast {
70 __le16 opcode_channel;
71
72 u8 unused;
73 u8 sjw; /* Sync Jump Width */
74 u8 tseg2; /* Timing SEGment 2 */
75 u8 tseg1; /* Timing SEGment 1 */
76
77 __le16 brp; /* BaudRate Prescaler */
78};
79
80/* uCAN FILTER_STD command fields */
81#define PUCAN_FLTSTD_ROW_IDX_BITS 6
82
83struct __packed pucan_filter_std {
84 __le16 opcode_channel;
85
86 __le16 idx;
87 __le32 mask; /* CAN-ID bitmask in idx range */
88};
89
90/* uCAN WR_ERR_CNT command fields */
91#define PUCAN_WRERRCNT_TE 0x4000 /* Tx error cntr write Enable */
92#define PUCAN_WRERRCNT_RE 0x8000 /* Rx error cntr write Enable */
93
94struct __packed pucan_wr_err_cnt {
95 __le16 opcode_channel;
96
97 __le16 sel_mask;
98 u8 tx_counter; /* Tx error counter new value */
99 u8 rx_counter; /* Rx error counter new value */
100
101 u16 unused;
102};
103
104/* uCAN RX_FRAME_ENABLE command fields */
105#define PUCAN_FLTEXT_ERROR 0x0001
106#define PUCAN_FLTEXT_BUSLOAD 0x0002
107
108struct __packed pucan_filter_ext {
109 __le16 opcode_channel;
110
111 __le16 ext_mask;
112 u32 unused;
113};
114
115/* uCAN received messages global format */
116struct __packed pucan_msg {
117 __le16 size;
118 __le16 type;
119 __le32 ts_low;
120 __le32 ts_high;
121};
122
123/* uCAN flags for CAN/CANFD messages */
124#define PUCAN_MSG_SELF_RECEIVE 0x80
125#define PUCAN_MSG_ERROR_STATE_IND 0x40 /* error state indicator */
126#define PUCAN_MSG_BITRATE_SWITCH 0x20 /* bitrate switch */
127#define PUCAN_MSG_EXT_DATA_LEN 0x10 /* extended data length */
128#define PUCAN_MSG_SINGLE_SHOT 0x08
129#define PUCAN_MSG_LOOPED_BACK 0x04
130#define PUCAN_MSG_EXT_ID 0x02
131#define PUCAN_MSG_RTR 0x01
132
133struct __packed pucan_rx_msg {
134 __le16 size;
135 __le16 type;
136 __le32 ts_low;
137 __le32 ts_high;
138 __le32 tag_low;
139 __le32 tag_high;
140 u8 channel_dlc;
141 u8 client;
142 __le16 flags;
143 __le32 can_id;
144 u8 d[0];
145};
146
147/* uCAN error types */
148#define PUCAN_ERMSG_BIT_ERROR 0
149#define PUCAN_ERMSG_FORM_ERROR 1
150#define PUCAN_ERMSG_STUFF_ERROR 2
151#define PUCAN_ERMSG_OTHER_ERROR 3
152#define PUCAN_ERMSG_ERR_CNT_DEC 4
153
154struct __packed pucan_error_msg {
155 __le16 size;
156 __le16 type;
157 __le32 ts_low;
158 __le32 ts_high;
159 u8 channel_type_d;
160 u8 code_g;
161 u8 tx_err_cnt;
162 u8 rx_err_cnt;
163};
164
165#define PUCAN_BUS_PASSIVE 0x20
166#define PUCAN_BUS_WARNING 0x40
167#define PUCAN_BUS_BUSOFF 0x80
168
169struct __packed pucan_status_msg {
170 __le16 size;
171 __le16 type;
172 __le32 ts_low;
173 __le32 ts_high;
174 u8 channel_p_w_b;
175 u8 unused[3];
176};
177
178/* uCAN transmitted message format */
179#define PUCAN_MSG_CHANNEL_DLC(c, d) (((c) & 0xf) | ((d) << 4))
180
181struct __packed pucan_tx_msg {
182 __le16 size;
183 __le16 type;
184 __le32 tag_low;
185 __le32 tag_high;
186 u8 channel_dlc;
187 u8 client;
188 __le16 flags;
189 __le32 can_id;
190 u8 d[0];
191};
192
193/* build the cmd opcode_channel field with respect to the correct endianness */
194static inline __le16 pucan_cmd_opcode_channel(struct peak_usb_device *dev,
195 int opcode)
196{
197 return cpu_to_le16(((dev->ctrl_idx) << 12) | ((opcode) & 0x3ff));
198}
199
200/* return the channel number part from any received message channel_dlc field */
201static inline int pucan_msg_get_channel(struct pucan_rx_msg *rm)
202{
203 return rm->channel_dlc & 0xf;
204}
205
206/* return the dlc value from any received message channel_dlc field */
207static inline int pucan_msg_get_dlc(struct pucan_rx_msg *rm)
208{
209 return rm->channel_dlc >> 4;
210}
211
212static inline int pucan_ermsg_get_channel(struct pucan_error_msg *em)
213{
214 return em->channel_type_d & 0x0f;
215}
216
217static inline int pucan_stmsg_get_channel(struct pucan_status_msg *sm)
218{
219 return sm->channel_p_w_b & 0x0f;
220}
221
222#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 4e1659d07979..72427f21edff 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -488,6 +488,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
488 switch (new_state) { 488 switch (new_state) {
489 case CAN_STATE_BUS_OFF: 489 case CAN_STATE_BUS_OFF:
490 cf->can_id |= CAN_ERR_BUSOFF; 490 cf->can_id |= CAN_ERR_BUSOFF;
491 mc->pdev->dev.can.can_stats.bus_off++;
491 can_bus_off(mc->netdev); 492 can_bus_off(mc->netdev);
492 break; 493 break;
493 494
@@ -854,10 +855,11 @@ static int pcan_usb_probe(struct usb_interface *intf)
854/* 855/*
855 * describe the PCAN-USB adapter 856 * describe the PCAN-USB adapter
856 */ 857 */
857struct peak_usb_adapter pcan_usb = { 858const struct peak_usb_adapter pcan_usb = {
858 .name = "PCAN-USB", 859 .name = "PCAN-USB",
859 .device_id = PCAN_USB_PRODUCT_ID, 860 .device_id = PCAN_USB_PRODUCT_ID,
860 .ctrl_count = 1, 861 .ctrl_count = 1,
862 .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
861 .clock = { 863 .clock = {
862 .freq = PCAN_USB_CRYSTAL_HZ / 2 , 864 .freq = PCAN_USB_CRYSTAL_HZ / 2 ,
863 }, 865 },
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index c62f48a1161d..7921cff93a63 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -37,16 +37,19 @@ MODULE_LICENSE("GPL v2");
37static struct usb_device_id peak_usb_table[] = { 37static struct usb_device_id peak_usb_table[] = {
38 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)}, 38 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)},
39 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, 39 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
40 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
41 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
40 {} /* Terminating entry */ 42 {} /* Terminating entry */
41}; 43};
42 44
43MODULE_DEVICE_TABLE(usb, peak_usb_table); 45MODULE_DEVICE_TABLE(usb, peak_usb_table);
44 46
45/* List of supported PCAN-USB adapters (NULL terminated list) */ 47/* List of supported PCAN-USB adapters (NULL terminated list) */
46static struct peak_usb_adapter *peak_usb_adapters_list[] = { 48static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
47 &pcan_usb, 49 &pcan_usb,
48 &pcan_usb_pro, 50 &pcan_usb_pro,
49 NULL, 51 &pcan_usb_fd,
52 &pcan_usb_pro_fd,
50}; 53};
51 54
52/* 55/*
@@ -65,7 +68,7 @@ void pcan_dump_mem(char *prompt, void *p, int l)
65 * initialize a time_ref object with usb adapter own settings 68 * initialize a time_ref object with usb adapter own settings
66 */ 69 */
67void peak_usb_init_time_ref(struct peak_time_ref *time_ref, 70void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
68 struct peak_usb_adapter *adapter) 71 const struct peak_usb_adapter *adapter)
69{ 72{
70 if (time_ref) { 73 if (time_ref) {
71 memset(time_ref, 0, sizeof(struct peak_time_ref)); 74 memset(time_ref, 0, sizeof(struct peak_time_ref));
@@ -165,6 +168,21 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
165} 168}
166 169
167/* 170/*
171 * post received skb after having set any hw timestamp
172 */
173int peak_usb_netif_rx(struct sk_buff *skb,
174 struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high)
175{
176 struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
177 struct timeval tv;
178
179 peak_usb_get_ts_tv(time_ref, ts_low, &tv);
180 hwts->hwtstamp = timeval_to_ktime(tv);
181
182 return netif_rx(skb);
183}
184
185/*
168 * callback for bulk Rx urb 186 * callback for bulk Rx urb
169 */ 187 */
170static void peak_usb_read_bulk_callback(struct urb *urb) 188static void peak_usb_read_bulk_callback(struct urb *urb)
@@ -253,7 +271,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
253 case 0: 271 case 0:
254 /* transmission complete */ 272 /* transmission complete */
255 netdev->stats.tx_packets++; 273 netdev->stats.tx_packets++;
256 netdev->stats.tx_bytes += context->dlc; 274 netdev->stats.tx_bytes += context->data_len;
257 275
258 /* prevent tx timeout */ 276 /* prevent tx timeout */
259 netdev->trans_start = jiffies; 277 netdev->trans_start = jiffies;
@@ -289,7 +307,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
289 struct peak_usb_device *dev = netdev_priv(netdev); 307 struct peak_usb_device *dev = netdev_priv(netdev);
290 struct peak_tx_urb_context *context = NULL; 308 struct peak_tx_urb_context *context = NULL;
291 struct net_device_stats *stats = &netdev->stats; 309 struct net_device_stats *stats = &netdev->stats;
292 struct can_frame *cf = (struct can_frame *)skb->data; 310 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
293 struct urb *urb; 311 struct urb *urb;
294 u8 *obuf; 312 u8 *obuf;
295 int i, err; 313 int i, err;
@@ -322,7 +340,9 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
322 } 340 }
323 341
324 context->echo_index = i; 342 context->echo_index = i;
325 context->dlc = cf->can_dlc; 343
344 /* Note: this works with CANFD frames too */
345 context->data_len = cfd->len;
326 346
327 usb_anchor_urb(urb, &dev->tx_submitted); 347 usb_anchor_urb(urb, &dev->tx_submitted);
328 348
@@ -679,19 +699,43 @@ static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode)
679} 699}
680 700
681/* 701/*
682 * candev callback used to set device bitrate. 702 * candev callback used to set device nominal/arbitration bitrate.
683 */ 703 */
684static int peak_usb_set_bittiming(struct net_device *netdev) 704static int peak_usb_set_bittiming(struct net_device *netdev)
685{ 705{
686 struct peak_usb_device *dev = netdev_priv(netdev); 706 struct peak_usb_device *dev = netdev_priv(netdev);
687 struct can_bittiming *bt = &dev->can.bittiming; 707 const struct peak_usb_adapter *pa = dev->adapter;
688 708
689 if (dev->adapter->dev_set_bittiming) { 709 if (pa->dev_set_bittiming) {
690 int err = dev->adapter->dev_set_bittiming(dev, bt); 710 struct can_bittiming *bt = &dev->can.bittiming;
711 int err = pa->dev_set_bittiming(dev, bt);
691 712
692 if (err) 713 if (err)
693 netdev_info(netdev, "couldn't set bitrate (err %d)\n", 714 netdev_info(netdev, "couldn't set bitrate (err %d)\n",
694 err); 715 err);
716 return err;
717 }
718
719 return 0;
720}
721
722/*
723 * candev callback used to set device data bitrate.
724 */
725static int peak_usb_set_data_bittiming(struct net_device *netdev)
726{
727 struct peak_usb_device *dev = netdev_priv(netdev);
728 const struct peak_usb_adapter *pa = dev->adapter;
729
730 if (pa->dev_set_data_bittiming) {
731 struct can_bittiming *bt = &dev->can.data_bittiming;
732 int err = pa->dev_set_data_bittiming(dev, bt);
733
734 if (err)
735 netdev_info(netdev,
736 "couldn't set data bitrate (err %d)\n",
737 err);
738
695 return err; 739 return err;
696 } 740 }
697 741
@@ -709,7 +753,7 @@ static const struct net_device_ops peak_usb_netdev_ops = {
709 * create one device which is attached to CAN controller #ctrl_idx of the 753 * create one device which is attached to CAN controller #ctrl_idx of the
710 * usb adapter. 754 * usb adapter.
711 */ 755 */
712static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, 756static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
713 struct usb_interface *intf, int ctrl_idx) 757 struct usb_interface *intf, int ctrl_idx)
714{ 758{
715 struct usb_device *usb_dev = interface_to_usbdev(intf); 759 struct usb_device *usb_dev = interface_to_usbdev(intf);
@@ -750,9 +794,11 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
750 dev->can.clock = peak_usb_adapter->clock; 794 dev->can.clock = peak_usb_adapter->clock;
751 dev->can.bittiming_const = &peak_usb_adapter->bittiming_const; 795 dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
752 dev->can.do_set_bittiming = peak_usb_set_bittiming; 796 dev->can.do_set_bittiming = peak_usb_set_bittiming;
797 dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
798 dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
753 dev->can.do_set_mode = peak_usb_set_mode; 799 dev->can.do_set_mode = peak_usb_set_mode;
754 dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | 800 dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
755 CAN_CTRLMODE_LISTENONLY; 801 dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported;
756 802
757 netdev->netdev_ops = &peak_usb_netdev_ops; 803 netdev->netdev_ops = &peak_usb_netdev_ops;
758 804
@@ -857,17 +903,18 @@ static int peak_usb_probe(struct usb_interface *intf,
857{ 903{
858 struct usb_device *usb_dev = interface_to_usbdev(intf); 904 struct usb_device *usb_dev = interface_to_usbdev(intf);
859 const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct); 905 const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct);
860 struct peak_usb_adapter *peak_usb_adapter, **pp; 906 const struct peak_usb_adapter *peak_usb_adapter = NULL;
861 int i, err = -ENOMEM; 907 int i, err = -ENOMEM;
862 908
863 usb_dev = interface_to_usbdev(intf); 909 usb_dev = interface_to_usbdev(intf);
864 910
865 /* get corresponding PCAN-USB adapter */ 911 /* get corresponding PCAN-USB adapter */
866 for (pp = peak_usb_adapters_list; *pp; pp++) 912 for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++)
867 if ((*pp)->device_id == usb_id_product) 913 if (peak_usb_adapters_list[i]->device_id == usb_id_product) {
914 peak_usb_adapter = peak_usb_adapters_list[i];
868 break; 915 break;
916 }
869 917
870 peak_usb_adapter = *pp;
871 if (!peak_usb_adapter) { 918 if (!peak_usb_adapter) {
872 /* should never come except device_id bad usage in this file */ 919 /* should never come except device_id bad usage in this file */
873 pr_err("%s: didn't find device id. 0x%x in devices list\n", 920 pr_err("%s: didn't find device id. 0x%x in devices list\n",
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 073b47ff8eee..9e624f05ad4d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -25,6 +25,8 @@
25/* supported device ids. */ 25/* supported device ids. */
26#define PCAN_USB_PRODUCT_ID 0x000c 26#define PCAN_USB_PRODUCT_ID 0x000c
27#define PCAN_USBPRO_PRODUCT_ID 0x000d 27#define PCAN_USBPRO_PRODUCT_ID 0x000d
28#define PCAN_USBPROFD_PRODUCT_ID 0x0011
29#define PCAN_USBFD_PRODUCT_ID 0x0012
28 30
29#define PCAN_USB_DRIVER_NAME "peak_usb" 31#define PCAN_USB_DRIVER_NAME "peak_usb"
30 32
@@ -44,8 +46,10 @@ struct peak_usb_device;
44struct peak_usb_adapter { 46struct peak_usb_adapter {
45 char *name; 47 char *name;
46 u32 device_id; 48 u32 device_id;
49 u32 ctrlmode_supported;
47 struct can_clock clock; 50 struct can_clock clock;
48 const struct can_bittiming_const bittiming_const; 51 const struct can_bittiming_const bittiming_const;
52 const struct can_bittiming_const data_bittiming_const;
49 unsigned int ctrl_count; 53 unsigned int ctrl_count;
50 54
51 int (*intf_probe)(struct usb_interface *intf); 55 int (*intf_probe)(struct usb_interface *intf);
@@ -57,6 +61,8 @@ struct peak_usb_adapter {
57 int (*dev_close)(struct peak_usb_device *dev); 61 int (*dev_close)(struct peak_usb_device *dev);
58 int (*dev_set_bittiming)(struct peak_usb_device *dev, 62 int (*dev_set_bittiming)(struct peak_usb_device *dev,
59 struct can_bittiming *bt); 63 struct can_bittiming *bt);
64 int (*dev_set_data_bittiming)(struct peak_usb_device *dev,
65 struct can_bittiming *bt);
60 int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff); 66 int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff);
61 int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id); 67 int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id);
62 int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb); 68 int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb);
@@ -66,6 +72,8 @@ struct peak_usb_adapter {
66 int (*dev_stop)(struct peak_usb_device *dev); 72 int (*dev_stop)(struct peak_usb_device *dev);
67 int (*dev_restart_async)(struct peak_usb_device *dev, struct urb *urb, 73 int (*dev_restart_async)(struct peak_usb_device *dev, struct urb *urb,
68 u8 *buf); 74 u8 *buf);
75 int (*do_get_berr_counter)(const struct net_device *netdev,
76 struct can_berr_counter *bec);
69 u8 ep_msg_in; 77 u8 ep_msg_in;
70 u8 ep_msg_out[PCAN_USB_MAX_CHANNEL]; 78 u8 ep_msg_out[PCAN_USB_MAX_CHANNEL];
71 u8 ts_used_bits; 79 u8 ts_used_bits;
@@ -78,21 +86,23 @@ struct peak_usb_adapter {
78 int sizeof_dev_private; 86 int sizeof_dev_private;
79}; 87};
80 88
81extern struct peak_usb_adapter pcan_usb; 89extern const struct peak_usb_adapter pcan_usb;
82extern struct peak_usb_adapter pcan_usb_pro; 90extern const struct peak_usb_adapter pcan_usb_pro;
91extern const struct peak_usb_adapter pcan_usb_fd;
92extern const struct peak_usb_adapter pcan_usb_pro_fd;
83 93
84struct peak_time_ref { 94struct peak_time_ref {
85 struct timeval tv_host_0, tv_host; 95 struct timeval tv_host_0, tv_host;
86 u32 ts_dev_1, ts_dev_2; 96 u32 ts_dev_1, ts_dev_2;
87 u64 ts_total; 97 u64 ts_total;
88 u32 tick_count; 98 u32 tick_count;
89 struct peak_usb_adapter *adapter; 99 const struct peak_usb_adapter *adapter;
90}; 100};
91 101
92struct peak_tx_urb_context { 102struct peak_tx_urb_context {
93 struct peak_usb_device *dev; 103 struct peak_usb_device *dev;
94 u32 echo_index; 104 u32 echo_index;
95 u8 dlc; 105 u8 data_len;
96 struct urb *urb; 106 struct urb *urb;
97}; 107};
98 108
@@ -102,7 +112,7 @@ struct peak_tx_urb_context {
102/* PEAK-System USB device */ 112/* PEAK-System USB device */
103struct peak_usb_device { 113struct peak_usb_device {
104 struct can_priv can; 114 struct can_priv can;
105 struct peak_usb_adapter *adapter; 115 const struct peak_usb_adapter *adapter;
106 unsigned int ctrl_idx; 116 unsigned int ctrl_idx;
107 u32 state; 117 u32 state;
108 118
@@ -134,12 +144,14 @@ void pcan_dump_mem(char *prompt, void *p, int l);
134 144
135/* common timestamp management */ 145/* common timestamp management */
136void peak_usb_init_time_ref(struct peak_time_ref *time_ref, 146void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
137 struct peak_usb_adapter *adapter); 147 const struct peak_usb_adapter *adapter);
138void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now); 148void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
139void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now); 149void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
140void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts, 150void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
141 struct timeval *tv); 151 struct timeval *tv);
142 152int peak_usb_netif_rx(struct sk_buff *skb,
153 struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high);
143void peak_usb_async_complete(struct urb *urb); 154void peak_usb_async_complete(struct urb *urb);
144void peak_usb_restart_complete(struct peak_usb_device *dev); 155void peak_usb_restart_complete(struct peak_usb_device *dev);
156
145#endif 157#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
new file mode 100644
index 000000000000..962c3f027383
--- /dev/null
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -0,0 +1,1095 @@
1/*
2 * CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter
3 *
4 * Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15#include <linux/netdevice.h>
16#include <linux/usb.h>
17#include <linux/module.h>
18
19#include <linux/can.h>
20#include <linux/can/dev.h>
21#include <linux/can/error.h>
22
23#include "pcan_usb_core.h"
24#include "pcan_usb_pro.h"
25#include "pcan_ucan.h"
26
27MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter");
28MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter");
29
30#define PCAN_USBPROFD_CHANNEL_COUNT 2
31#define PCAN_USBFD_CHANNEL_COUNT 1
32
33/* PCAN-USB Pro FD adapter internal clock (Hz) */
34#define PCAN_UFD_CRYSTAL_HZ 80000000
35
36#define PCAN_UFD_CMD_BUFFER_SIZE 512
37#define PCAN_UFD_LOSPD_PKT_SIZE 64
38
39/* PCAN-USB Pro FD command timeout (ms.) */
40#define PCAN_UFD_CMD_TIMEOUT_MS 1000
41
42/* PCAN-USB Pro FD rx/tx buffers size */
43#define PCAN_UFD_RX_BUFFER_SIZE 2048
44#define PCAN_UFD_TX_BUFFER_SIZE 512
45
46/* read some versions info from the hw devcie */
47struct __packed pcan_ufd_fw_info {
48 __le16 size_of; /* sizeof this */
49 __le16 type; /* type of this structure */
50 u8 hw_type; /* Type of hardware (HW_TYPE_xxx) */
51 u8 bl_version[3]; /* Bootloader version */
52 u8 hw_version; /* Hardware version (PCB) */
53 u8 fw_version[3]; /* Firmware version */
54 __le32 dev_id[2]; /* "device id" per CAN */
55 __le32 ser_no; /* S/N */
56 __le32 flags; /* special functions */
57};
58
59/* handle device specific info used by the netdevices */
60struct pcan_usb_fd_if {
61 struct peak_usb_device *dev[PCAN_USB_MAX_CHANNEL];
62 struct pcan_ufd_fw_info fw_info;
63 struct peak_time_ref time_ref;
64 int cm_ignore_count;
65 int dev_opened_count;
66};
67
68/* device information */
69struct pcan_usb_fd_device {
70 struct peak_usb_device dev;
71 struct can_berr_counter bec;
72 struct pcan_usb_fd_if *usb_if;
73 u8 *cmd_buffer_addr;
74};
75
76/* Extended USB commands (non uCAN commands) */
77
78/* Clock Modes command */
79#define PCAN_UFD_CMD_CLK_SET 0x80
80
81#define PCAN_UFD_CLK_80MHZ 0x0
82#define PCAN_UFD_CLK_60MHZ 0x1
83#define PCAN_UFD_CLK_40MHZ 0x2
84#define PCAN_UFD_CLK_30MHZ 0x3
85#define PCAN_UFD_CLK_24MHZ 0x4
86#define PCAN_UFD_CLK_20MHZ 0x5
87#define PCAN_UFD_CLK_DEF PCAN_UFD_CLK_80MHZ
88
89struct __packed pcan_ufd_clock {
90 __le16 opcode_channel;
91
92 u8 mode;
93 u8 unused[5];
94};
95
96/* LED control command */
97#define PCAN_UFD_CMD_LED_SET 0x86
98
99#define PCAN_UFD_LED_DEV 0x00
100#define PCAN_UFD_LED_FAST 0x01
101#define PCAN_UFD_LED_SLOW 0x02
102#define PCAN_UFD_LED_ON 0x03
103#define PCAN_UFD_LED_OFF 0x04
104#define PCAN_UFD_LED_DEF PCAN_UFD_LED_DEV
105
106struct __packed pcan_ufd_led {
107 __le16 opcode_channel;
108
109 u8 mode;
110 u8 unused[5];
111};
112
113/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */
114#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000
115
116struct __packed pcan_ufd_filter_ext {
117 __le16 opcode_channel;
118
119 __le16 ext_mask;
120 u16 unused;
121 __le16 usb_mask;
122};
123
124/* Extended usage of uCAN messages for PCAN-USB Pro FD */
125#define PCAN_UFD_MSG_CALIBRATION 0x100
126
127struct __packed pcan_ufd_ts_msg {
128 __le16 size;
129 __le16 type;
130 __le32 ts_low;
131 __le32 ts_high;
132 __le16 usb_frame_index;
133 u16 unused;
134};
135
136#define PCAN_UFD_MSG_OVERRUN 0x101
137
138#define PCAN_UFD_OVMSG_CHANNEL(o) ((o)->channel & 0xf)
139
140struct __packed pcan_ufd_ovr_msg {
141 __le16 size;
142 __le16 type;
143 __le32 ts_low;
144 __le32 ts_high;
145 u8 channel;
146 u8 unused[3];
147};
148
149static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om)
150{
151 return om->channel & 0xf;
152}
153
154/* Clock mode frequency values */
155static const u32 pcan_usb_fd_clk_freq[6] = {
156 [PCAN_UFD_CLK_80MHZ] = 80000000,
157 [PCAN_UFD_CLK_60MHZ] = 60000000,
158 [PCAN_UFD_CLK_40MHZ] = 40000000,
159 [PCAN_UFD_CLK_30MHZ] = 30000000,
160 [PCAN_UFD_CLK_24MHZ] = 24000000,
161 [PCAN_UFD_CLK_20MHZ] = 20000000
162};
163
164/* return a device USB interface */
165static inline
166struct pcan_usb_fd_if *pcan_usb_fd_dev_if(struct peak_usb_device *dev)
167{
168 struct pcan_usb_fd_device *pdev =
169 container_of(dev, struct pcan_usb_fd_device, dev);
170 return pdev->usb_if;
171}
172
173/* return a device USB commands buffer */
174static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
175{
176 struct pcan_usb_fd_device *pdev =
177 container_of(dev, struct pcan_usb_fd_device, dev);
178 return pdev->cmd_buffer_addr;
179}
180
181/* send PCAN-USB Pro FD commands synchronously */
182static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
183{
184 void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
185 int err;
186 u8 *packet_ptr;
187 int i, n = 1, packet_len;
188 ptrdiff_t cmd_len;
189
190 /* usb device unregistered? */
191 if (!(dev->state & PCAN_USB_STATE_CONNECTED))
192 return 0;
193
194 /* if a packet is not filled completely by commands, the command list
195 * is terminated with an "end of collection" record.
196 */
197 cmd_len = cmd_tail - cmd_head;
198 if (cmd_len <= (PCAN_UFD_CMD_BUFFER_SIZE - sizeof(u64))) {
199 memset(cmd_tail, 0xff, sizeof(u64));
200 cmd_len += sizeof(u64);
201 }
202
203 packet_ptr = cmd_head;
204
205 /* firmware is not able to re-assemble 512 bytes buffer in full-speed */
206 if ((dev->udev->speed != USB_SPEED_HIGH) &&
207 (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
208 packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
209 n += cmd_len / packet_len;
210 } else {
211 packet_len = cmd_len;
212 }
213
214 for (i = 0; i < n; i++) {
215 err = usb_bulk_msg(dev->udev,
216 usb_sndbulkpipe(dev->udev,
217 PCAN_USBPRO_EP_CMDOUT),
218 packet_ptr, packet_len,
219 NULL, PCAN_UFD_CMD_TIMEOUT_MS);
220 if (err) {
221 netdev_err(dev->netdev,
222 "sending command failure: %d\n", err);
223 break;
224 }
225
226 packet_ptr += packet_len;
227 }
228
229 return err;
230}
231
232/* build the commands list in the given buffer, to enter operational mode */
233static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
234{
235 struct pucan_wr_err_cnt *prc;
236 struct pucan_command *cmd;
237 u8 *pc = buf;
238
239 /* 1st, reset error counters: */
240 prc = (struct pucan_wr_err_cnt *)pc;
241 prc->opcode_channel = pucan_cmd_opcode_channel(dev,
242 PUCAN_CMD_WR_ERR_CNT);
243
244 /* select both counters */
245 prc->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE|PUCAN_WRERRCNT_RE);
246
247 /* and reset their values */
248 prc->tx_counter = 0;
249 prc->rx_counter = 0;
250
251 /* moves the pointer forward */
252 pc += sizeof(struct pucan_wr_err_cnt);
253
254 /* next, go back to operational mode */
255 cmd = (struct pucan_command *)pc;
256 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
257 (dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) ?
258 PUCAN_CMD_LISTEN_ONLY_MODE :
259 PUCAN_CMD_NORMAL_MODE);
260 pc += sizeof(struct pucan_command);
261
262 return pc - buf;
263}
264
265/* set CAN bus on/off */
266static int pcan_usb_fd_set_bus(struct peak_usb_device *dev, u8 onoff)
267{
268 u8 *pc = pcan_usb_fd_cmd_buffer(dev);
269 int l;
270
271 if (onoff) {
272 /* build the cmds list to enter operational mode */
273 l = pcan_usb_fd_build_restart_cmd(dev, pc);
274 } else {
275 struct pucan_command *cmd = (struct pucan_command *)pc;
276
277 /* build cmd to go back to reset mode */
278 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
279 PUCAN_CMD_RESET_MODE);
280 l = sizeof(struct pucan_command);
281 }
282
283 /* send the command */
284 return pcan_usb_fd_send_cmd(dev, pc + l);
285}
286
287/* set filtering masks:
288 *
289 * idx in range [0..63] selects a row #idx, all rows otherwise
290 * mask in range [0..0xffffffff] defines up to 32 CANIDs in the row(s)
291 *
292 * Each bit of this 64 x 32 bits array defines a CANID value:
293 *
294 * bit[i,j] = 1 implies that CANID=(i x 32)+j will be received, while
295 * bit[i,j] = 0 implies that CANID=(i x 32)+j will be discarded.
296 */
297static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
298 u32 mask)
299{
300 struct pucan_filter_std *cmd = pcan_usb_fd_cmd_buffer(dev);
301 int i, n;
302
303 /* select all rows when idx is out of range [0..63] */
304 if ((idx < 0) || (idx >= (1 << PUCAN_FLTSTD_ROW_IDX_BITS))) {
305 n = 1 << PUCAN_FLTSTD_ROW_IDX_BITS;
306 idx = 0;
307
308 /* select the row (and only the row) otherwise */
309 } else {
310 n = idx + 1;
311 }
312
313 for (i = idx; i < n; i++, cmd++) {
314 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
315 PUCAN_CMD_FILTER_STD);
316 cmd->idx = cpu_to_le16(i);
317 cmd->mask = cpu_to_le32(mask);
318 }
319
320 /* send the command */
321 return pcan_usb_fd_send_cmd(dev, cmd);
322}
323
324/* set/unset notifications filter:
325 *
326 * onoff sets(1)/unset(0) notifications
327 * mask each bit defines a kind of notification to set/unset
328 */
329static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev,
330 bool onoff, u16 ext_mask, u16 usb_mask)
331{
332 struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev);
333
334 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
335 (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE :
336 PUCAN_CMD_RX_FRAME_DISABLE);
337
338 cmd->ext_mask = cpu_to_le16(ext_mask);
339 cmd->usb_mask = cpu_to_le16(usb_mask);
340
341 /* send the command */
342 return pcan_usb_fd_send_cmd(dev, ++cmd);
343}
344
345/* setup LED control */
346static int pcan_usb_fd_set_can_led(struct peak_usb_device *dev, u8 led_mode)
347{
348 struct pcan_ufd_led *cmd = pcan_usb_fd_cmd_buffer(dev);
349
350 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
351 PCAN_UFD_CMD_LED_SET);
352 cmd->mode = led_mode;
353
354 /* send the command */
355 return pcan_usb_fd_send_cmd(dev, ++cmd);
356}
357
358/* set CAN clock domain */
359static int pcan_usb_fd_set_clock_domain(struct peak_usb_device *dev,
360 u8 clk_mode)
361{
362 struct pcan_ufd_clock *cmd = pcan_usb_fd_cmd_buffer(dev);
363
364 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
365 PCAN_UFD_CMD_CLK_SET);
366 cmd->mode = clk_mode;
367
368 /* send the command */
369 return pcan_usb_fd_send_cmd(dev, ++cmd);
370}
371
372/* set bittiming for CAN and CAN-FD header */
373static int pcan_usb_fd_set_bittiming_slow(struct peak_usb_device *dev,
374 struct can_bittiming *bt)
375{
376 struct pucan_timing_slow *cmd = pcan_usb_fd_cmd_buffer(dev);
377
378 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
379 PUCAN_CMD_TIMING_SLOW);
380 cmd->sjw_t = PUCAN_TSLOW_SJW_T(bt->sjw - 1,
381 dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES);
382
383 cmd->tseg2 = PUCAN_TSLOW_TSEG2(bt->phase_seg2 - 1);
384 cmd->tseg1 = PUCAN_TSLOW_TSEG1(bt->prop_seg + bt->phase_seg1 - 1);
385 cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(bt->brp - 1));
386
387 cmd->ewl = 96; /* default */
388
389 /* send the command */
390 return pcan_usb_fd_send_cmd(dev, ++cmd);
391}
392
393/* set CAN-FD bittiming for data */
394static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
395 struct can_bittiming *bt)
396{
397 struct pucan_timing_fast *cmd = pcan_usb_fd_cmd_buffer(dev);
398
399 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
400 PUCAN_CMD_TIMING_FAST);
401 cmd->sjw = PUCAN_TFAST_SJW(bt->sjw - 1);
402 cmd->tseg2 = PUCAN_TFAST_TSEG2(bt->phase_seg2 - 1);
403 cmd->tseg1 = PUCAN_TFAST_TSEG1(bt->prop_seg + bt->phase_seg1 - 1);
404 cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(bt->brp - 1));
405
406 /* send the command */
407 return pcan_usb_fd_send_cmd(dev, ++cmd);
408}
409
410/* handle restart but in asynchronously way
411 * (uses PCAN-USB Pro code to complete asynchronous request)
412 */
413static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
414 struct urb *urb, u8 *buf)
415{
416 u8 *pc = buf;
417
418 /* build the entire cmds list in the provided buffer, to go back into
419 * operational mode.
420 */
421 pc += pcan_usb_fd_build_restart_cmd(dev, pc);
422
423 /* add EOC */
424 memset(pc, 0xff, sizeof(struct pucan_command));
425 pc += sizeof(struct pucan_command);
426
427 /* complete the URB */
428 usb_fill_bulk_urb(urb, dev->udev,
429 usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT),
430 buf, pc - buf,
431 pcan_usb_pro_restart_complete, dev);
432
433 /* and submit it. */
434 return usb_submit_urb(urb, GFP_ATOMIC);
435}
436
437static int pcan_usb_fd_drv_loaded(struct peak_usb_device *dev, bool loaded)
438{
439 struct pcan_usb_fd_device *pdev =
440 container_of(dev, struct pcan_usb_fd_device, dev);
441
442 pdev->cmd_buffer_addr[0] = 0;
443 pdev->cmd_buffer_addr[1] = !!loaded;
444
445 return pcan_usb_pro_send_req(dev,
446 PCAN_USBPRO_REQ_FCT,
447 PCAN_USBPRO_FCT_DRVLD,
448 pdev->cmd_buffer_addr,
449 PCAN_USBPRO_FCT_DRVLD_REQ_LEN);
450}
451
452static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
453 struct pucan_msg *rx_msg)
454{
455 struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
456 struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
457 struct net_device *netdev = dev->netdev;
458 struct canfd_frame *cfd;
459 struct sk_buff *skb;
460 const u16 rx_msg_flags = le16_to_cpu(rm->flags);
461
462 if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
463 /* CANFD frame case */
464 skb = alloc_canfd_skb(netdev, &cfd);
465 if (!skb)
466 return -ENOMEM;
467
468 if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH)
469 cfd->flags |= CANFD_BRS;
470
471 if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND)
472 cfd->flags |= CANFD_ESI;
473
474 cfd->len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(rm)));
475 } else {
476 /* CAN 2.0 frame case */
477 skb = alloc_can_skb(netdev, (struct can_frame **)&cfd);
478 if (!skb)
479 return -ENOMEM;
480
481 cfd->len = get_can_dlc(pucan_msg_get_dlc(rm));
482 }
483
484 cfd->can_id = le32_to_cpu(rm->can_id);
485
486 if (rx_msg_flags & PUCAN_MSG_EXT_ID)
487 cfd->can_id |= CAN_EFF_FLAG;
488
489 if (rx_msg_flags & PUCAN_MSG_RTR)
490 cfd->can_id |= CAN_RTR_FLAG;
491 else
492 memcpy(cfd->data, rm->d, cfd->len);
493
494 peak_usb_netif_rx(skb, &usb_if->time_ref,
495 le32_to_cpu(rm->ts_low), le32_to_cpu(rm->ts_high));
496
497 netdev->stats.rx_packets++;
498 netdev->stats.rx_bytes += cfd->len;
499
500 return 0;
501}
502
503/* handle uCAN status message */
504static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
505 struct pucan_msg *rx_msg)
506{
507 struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
508 struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
509 struct pcan_usb_fd_device *pdev =
510 container_of(dev, struct pcan_usb_fd_device, dev);
511 enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
512 enum can_state rx_state, tx_state;
513 struct net_device *netdev = dev->netdev;
514 struct can_frame *cf;
515 struct sk_buff *skb;
516
517 /* nothing should be sent while in BUS_OFF state */
518 if (dev->can.state == CAN_STATE_BUS_OFF)
519 return 0;
520
521 if (sm->channel_p_w_b & PUCAN_BUS_BUSOFF) {
522 new_state = CAN_STATE_BUS_OFF;
523 } else if (sm->channel_p_w_b & PUCAN_BUS_PASSIVE) {
524 new_state = CAN_STATE_ERROR_PASSIVE;
525 } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
526 new_state = CAN_STATE_ERROR_WARNING;
527 } else {
528 /* no error bit (so, no error skb, back to active state) */
529 dev->can.state = CAN_STATE_ERROR_ACTIVE;
530 pdev->bec.txerr = 0;
531 pdev->bec.rxerr = 0;
532 return 0;
533 }
534
535 /* state hasn't changed */
536 if (new_state == dev->can.state)
537 return 0;
538
539 /* handle bus state change */
540 tx_state = (pdev->bec.txerr >= pdev->bec.rxerr) ? new_state : 0;
541 rx_state = (pdev->bec.txerr <= pdev->bec.rxerr) ? new_state : 0;
542
543 /* allocate an skb to store the error frame */
544 skb = alloc_can_err_skb(netdev, &cf);
545 if (skb)
546 can_change_state(netdev, cf, tx_state, rx_state);
547
548 /* things must be done even in case of OOM */
549 if (new_state == CAN_STATE_BUS_OFF)
550 can_bus_off(netdev);
551
552 if (!skb)
553 return -ENOMEM;
554
555 peak_usb_netif_rx(skb, &usb_if->time_ref,
556 le32_to_cpu(sm->ts_low), le32_to_cpu(sm->ts_high));
557
558 netdev->stats.rx_packets++;
559 netdev->stats.rx_bytes += cf->can_dlc;
560
561 return 0;
562}
563
564/* handle uCAN error message */
565static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if,
566 struct pucan_msg *rx_msg)
567{
568 struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
569 struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
570 struct pcan_usb_fd_device *pdev =
571 container_of(dev, struct pcan_usb_fd_device, dev);
572
573 /* keep a trace of tx and rx error counters for later use */
574 pdev->bec.txerr = er->tx_err_cnt;
575 pdev->bec.rxerr = er->rx_err_cnt;
576
577 return 0;
578}
579
580/* handle uCAN overrun message */
581static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
582 struct pucan_msg *rx_msg)
583{
584 struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
585 struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
586 struct net_device *netdev = dev->netdev;
587 struct can_frame *cf;
588 struct sk_buff *skb;
589
590 /* allocate an skb to store the error frame */
591 skb = alloc_can_err_skb(netdev, &cf);
592 if (!skb)
593 return -ENOMEM;
594
595 cf->can_id |= CAN_ERR_CRTL;
596 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
597
598 peak_usb_netif_rx(skb, &usb_if->time_ref,
599 le32_to_cpu(ov->ts_low), le32_to_cpu(ov->ts_high));
600
601 netdev->stats.rx_over_errors++;
602 netdev->stats.rx_errors++;
603
604 return 0;
605}
606
607/* handle USB calibration message */
608static void pcan_usb_fd_decode_ts(struct pcan_usb_fd_if *usb_if,
609 struct pucan_msg *rx_msg)
610{
611 struct pcan_ufd_ts_msg *ts = (struct pcan_ufd_ts_msg *)rx_msg;
612
613 /* should wait until clock is stabilized */
614 if (usb_if->cm_ignore_count > 0)
615 usb_if->cm_ignore_count--;
616 else
617 peak_usb_set_ts_now(&usb_if->time_ref, le32_to_cpu(ts->ts_low));
618}
619
620/* callback for bulk IN urb */
621static int pcan_usb_fd_decode_buf(struct peak_usb_device *dev, struct urb *urb)
622{
623 struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev);
624 struct net_device *netdev = dev->netdev;
625 struct pucan_msg *rx_msg;
626 u8 *msg_ptr, *msg_end;
627 int err = 0;
628
629 /* loop reading all the records from the incoming message */
630 msg_ptr = urb->transfer_buffer;
631 msg_end = urb->transfer_buffer + urb->actual_length;
632 for (; msg_ptr < msg_end;) {
633 u16 rx_msg_type, rx_msg_size;
634
635 rx_msg = (struct pucan_msg *)msg_ptr;
636 if (!rx_msg->size) {
637 /* null packet found: end of list */
638 break;
639 }
640
641 rx_msg_size = le16_to_cpu(rx_msg->size);
642 rx_msg_type = le16_to_cpu(rx_msg->type);
643
644 /* check if the record goes out of current packet */
645 if (msg_ptr + rx_msg_size > msg_end) {
646 netdev_err(netdev,
647 "got frag rec: should inc usb rx buf sze\n");
648 err = -EBADMSG;
649 break;
650 }
651
652 switch (rx_msg_type) {
653 case PUCAN_MSG_CAN_RX:
654 err = pcan_usb_fd_decode_canmsg(usb_if, rx_msg);
655 if (err < 0)
656 goto fail;
657 break;
658
659 case PCAN_UFD_MSG_CALIBRATION:
660 pcan_usb_fd_decode_ts(usb_if, rx_msg);
661 break;
662
663 case PUCAN_MSG_ERROR:
664 err = pcan_usb_fd_decode_error(usb_if, rx_msg);
665 if (err < 0)
666 goto fail;
667 break;
668
669 case PUCAN_MSG_STATUS:
670 err = pcan_usb_fd_decode_status(usb_if, rx_msg);
671 if (err < 0)
672 goto fail;
673 break;
674
675 case PCAN_UFD_MSG_OVERRUN:
676 err = pcan_usb_fd_decode_overrun(usb_if, rx_msg);
677 if (err < 0)
678 goto fail;
679 break;
680
681 default:
682 netdev_err(netdev,
683 "unhandled msg type 0x%02x (%d): ignored\n",
684 rx_msg_type, rx_msg_type);
685 break;
686 }
687
688 msg_ptr += rx_msg_size;
689 }
690
691fail:
692 if (err)
693 pcan_dump_mem("received msg",
694 urb->transfer_buffer, urb->actual_length);
695 return err;
696}
697
698/* CAN/CANFD frames encoding callback */
699static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
700 struct sk_buff *skb, u8 *obuf, size_t *size)
701{
702 struct pucan_tx_msg *tx_msg = (struct pucan_tx_msg *)obuf;
703 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
704 u16 tx_msg_size, tx_msg_flags;
705 u8 can_dlc;
706
707 tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
708 tx_msg->size = cpu_to_le16(tx_msg_size);
709 tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
710
711 tx_msg_flags = 0;
712 if (cfd->can_id & CAN_EFF_FLAG) {
713 tx_msg_flags |= PUCAN_MSG_EXT_ID;
714 tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_EFF_MASK);
715 } else {
716 tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_SFF_MASK);
717 }
718
719 if (can_is_canfd_skb(skb)) {
720 /* considering a CANFD frame */
721 can_dlc = can_len2dlc(cfd->len);
722
723 tx_msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
724
725 if (cfd->flags & CANFD_BRS)
726 tx_msg_flags |= PUCAN_MSG_BITRATE_SWITCH;
727
728 if (cfd->flags & CANFD_ESI)
729 tx_msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
730 } else {
731 /* CAND 2.0 frames */
732 can_dlc = cfd->len;
733
734 if (cfd->can_id & CAN_RTR_FLAG)
735 tx_msg_flags |= PUCAN_MSG_RTR;
736 }
737
738 tx_msg->flags = cpu_to_le16(tx_msg_flags);
739 tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, can_dlc);
740 memcpy(tx_msg->d, cfd->data, cfd->len);
741
742 /* add null size message to tag the end (messages are 32-bits aligned)
743 */
744 tx_msg = (struct pucan_tx_msg *)(obuf + tx_msg_size);
745
746 tx_msg->size = 0;
747
748 /* set the whole size of the USB packet to send */
749 *size = tx_msg_size + sizeof(u32);
750
751 return 0;
752}
753
754/* start the interface (last chance before set bus on) */
755static int pcan_usb_fd_start(struct peak_usb_device *dev)
756{
757 struct pcan_usb_fd_device *pdev =
758 container_of(dev, struct pcan_usb_fd_device, dev);
759 int err;
760
761 /* set filter mode: all acceptance */
762 err = pcan_usb_fd_set_filter_std(dev, -1, 0xffffffff);
763 if (err)
764 return err;
765
766 /* opening first device: */
767 if (pdev->usb_if->dev_opened_count == 0) {
768 /* reset time_ref */
769 peak_usb_init_time_ref(&pdev->usb_if->time_ref,
770 &pcan_usb_pro_fd);
771
772 /* enable USB calibration messages */
773 err = pcan_usb_fd_set_filter_ext(dev, 1,
774 PUCAN_FLTEXT_ERROR,
775 PCAN_UFD_FLTEXT_CALIBRATION);
776 }
777
778 pdev->usb_if->dev_opened_count++;
779
780 /* reset cached error counters */
781 pdev->bec.txerr = 0;
782 pdev->bec.rxerr = 0;
783
784 return err;
785}
786
787/* socket callback used to copy berr counters values receieved through USB */
788static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev,
789 struct can_berr_counter *bec)
790{
791 struct peak_usb_device *dev = netdev_priv(netdev);
792 struct pcan_usb_fd_device *pdev =
793 container_of(dev, struct pcan_usb_fd_device, dev);
794
795 *bec = pdev->bec;
796
797 /* must return 0 */
798 return 0;
799}
800
801/* stop interface (last chance before set bus off) */
802static int pcan_usb_fd_stop(struct peak_usb_device *dev)
803{
804 struct pcan_usb_fd_device *pdev =
805 container_of(dev, struct pcan_usb_fd_device, dev);
806
807 /* turn off special msgs for that interface if no other dev opened */
808 if (pdev->usb_if->dev_opened_count == 1)
809 pcan_usb_fd_set_filter_ext(dev, 0,
810 PUCAN_FLTEXT_ERROR,
811 PCAN_UFD_FLTEXT_CALIBRATION);
812 pdev->usb_if->dev_opened_count--;
813
814 return 0;
815}
816
817/* called when probing, to initialize a device object */
818static int pcan_usb_fd_init(struct peak_usb_device *dev)
819{
820 struct pcan_usb_fd_device *pdev =
821 container_of(dev, struct pcan_usb_fd_device, dev);
822 int i, err = -ENOMEM;
823
824 /* do this for 1st channel only */
825 if (!dev->prev_siblings) {
826 /* allocate netdevices common structure attached to first one */
827 pdev->usb_if = kzalloc(sizeof(*pdev->usb_if), GFP_KERNEL);
828 if (!pdev->usb_if)
829 goto err_out;
830
831 /* allocate command buffer once for all for the interface */
832 pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
833 GFP_KERNEL);
834 if (!pdev->cmd_buffer_addr)
835 goto err_out_1;
836
837 /* number of ts msgs to ignore before taking one into account */
838 pdev->usb_if->cm_ignore_count = 5;
839
840 err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
841 PCAN_USBPRO_INFO_FW,
842 &pdev->usb_if->fw_info,
843 sizeof(pdev->usb_if->fw_info));
844 if (err) {
845 dev_err(dev->netdev->dev.parent,
846 "unable to read %s firmware info (err %d)\n",
847 dev->adapter->name, err);
848 goto err_out_2;
849 }
850
851 /* explicit use of dev_xxx() instead of netdev_xxx() here:
852 * information displayed are related to the device itself, not
853 * to the canx (channel) device.
854 */
855 dev_info(dev->netdev->dev.parent,
856 "PEAK-System %s v%u fw v%u.%u.%u (%u channels)\n",
857 dev->adapter->name, pdev->usb_if->fw_info.hw_version,
858 pdev->usb_if->fw_info.fw_version[0],
859 pdev->usb_if->fw_info.fw_version[1],
860 pdev->usb_if->fw_info.fw_version[2],
861 dev->adapter->ctrl_count);
862
863 /* the currently supported hw is non-ISO */
864 dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
865
866 /* tell the hardware the can driver is running */
867 err = pcan_usb_fd_drv_loaded(dev, 1);
868 if (err) {
869 dev_err(dev->netdev->dev.parent,
870 "unable to tell %s driver is loaded (err %d)\n",
871 dev->adapter->name, err);
872 goto err_out_2;
873 }
874 } else {
875 /* otherwise, simply copy previous sibling's values */
876 struct pcan_usb_fd_device *ppdev =
877 container_of(dev->prev_siblings,
878 struct pcan_usb_fd_device, dev);
879
880 pdev->usb_if = ppdev->usb_if;
881 pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr;
882 }
883
884 pdev->usb_if->dev[dev->ctrl_idx] = dev;
885 dev->device_number =
886 le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
887
888 /* set clock domain */
889 for (i = 0; i < ARRAY_SIZE(pcan_usb_fd_clk_freq); i++)
890 if (dev->adapter->clock.freq == pcan_usb_fd_clk_freq[i])
891 break;
892
893 if (i >= ARRAY_SIZE(pcan_usb_fd_clk_freq)) {
894 dev_warn(dev->netdev->dev.parent,
895 "incompatible clock frequencies\n");
896 err = -EINVAL;
897 goto err_out_2;
898 }
899
900 pcan_usb_fd_set_clock_domain(dev, i);
901
902 /* set LED in default state (end of init phase) */
903 pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF);
904
905 return 0;
906
907err_out_2:
908 kfree(pdev->cmd_buffer_addr);
909err_out_1:
910 kfree(pdev->usb_if);
911err_out:
912 return err;
913}
914
915/* called when driver module is being unloaded */
916static void pcan_usb_fd_exit(struct peak_usb_device *dev)
917{
918 struct pcan_usb_fd_device *pdev =
919 container_of(dev, struct pcan_usb_fd_device, dev);
920
921 /* when rmmod called before unplug and if down, should reset things
922 * before leaving
923 */
924 if (dev->can.state != CAN_STATE_STOPPED) {
925 /* set bus off on the corresponding channel */
926 pcan_usb_fd_set_bus(dev, 0);
927 }
928
929 /* switch off corresponding CAN LEDs */
930 pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_OFF);
931
932 /* if channel #0 (only) */
933 if (dev->ctrl_idx == 0) {
934 /* turn off calibration message if any device were opened */
935 if (pdev->usb_if->dev_opened_count > 0)
936 pcan_usb_fd_set_filter_ext(dev, 0,
937 PUCAN_FLTEXT_ERROR,
938 PCAN_UFD_FLTEXT_CALIBRATION);
939
940 /* tell USB adapter that the driver is being unloaded */
941 pcan_usb_fd_drv_loaded(dev, 0);
942 }
943}
944
945/* called when the USB adapter is unplugged */
946static void pcan_usb_fd_free(struct peak_usb_device *dev)
947{
948 /* last device: can free shared objects now */
949 if (!dev->prev_siblings && !dev->next_siblings) {
950 struct pcan_usb_fd_device *pdev =
951 container_of(dev, struct pcan_usb_fd_device, dev);
952
953 /* free commands buffer */
954 kfree(pdev->cmd_buffer_addr);
955
956 /* free usb interface object */
957 kfree(pdev->usb_if);
958 }
959}
960
961/* describes the PCAN-USB FD adapter */
962const struct peak_usb_adapter pcan_usb_fd = {
963 .name = "PCAN-USB FD",
964 .device_id = PCAN_USBFD_PRODUCT_ID,
965 .ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
966 .ctrlmode_supported = CAN_CTRLMODE_FD |
967 CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
968 .clock = {
969 .freq = PCAN_UFD_CRYSTAL_HZ,
970 },
971 .bittiming_const = {
972 .name = "pcan_usb_fd",
973 .tseg1_min = 1,
974 .tseg1_max = 64,
975 .tseg2_min = 1,
976 .tseg2_max = 16,
977 .sjw_max = 16,
978 .brp_min = 1,
979 .brp_max = 1024,
980 .brp_inc = 1,
981 },
982 .data_bittiming_const = {
983 .name = "pcan_usb_fd",
984 .tseg1_min = 1,
985 .tseg1_max = 16,
986 .tseg2_min = 1,
987 .tseg2_max = 8,
988 .sjw_max = 4,
989 .brp_min = 1,
990 .brp_max = 1024,
991 .brp_inc = 1,
992 },
993
994 /* size of device private data */
995 .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
996
997 /* timestamps usage */
998 .ts_used_bits = 32,
999 .ts_period = 1000000, /* calibration period in ts. */
1000 .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
1001 .us_per_ts_shift = 0,
1002
1003 /* give here messages in/out endpoints */
1004 .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
1005 .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0},
1006
1007 /* size of rx/tx usb buffers */
1008 .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
1009 .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
1010
1011 /* device callbacks */
1012 .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
1013 .dev_init = pcan_usb_fd_init,
1014
1015 .dev_exit = pcan_usb_fd_exit,
1016 .dev_free = pcan_usb_fd_free,
1017 .dev_set_bus = pcan_usb_fd_set_bus,
1018 .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
1019 .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
1020 .dev_decode_buf = pcan_usb_fd_decode_buf,
1021 .dev_start = pcan_usb_fd_start,
1022 .dev_stop = pcan_usb_fd_stop,
1023 .dev_restart_async = pcan_usb_fd_restart_async,
1024 .dev_encode_msg = pcan_usb_fd_encode_msg,
1025
1026 .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
1027};
1028
1029/* describes the PCAN-USB Pro FD adapter */
1030const struct peak_usb_adapter pcan_usb_pro_fd = {
1031 .name = "PCAN-USB Pro FD",
1032 .device_id = PCAN_USBPROFD_PRODUCT_ID,
1033 .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT,
1034 .ctrlmode_supported = CAN_CTRLMODE_FD |
1035 CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
1036 .clock = {
1037 .freq = PCAN_UFD_CRYSTAL_HZ,
1038 },
1039 .bittiming_const = {
1040 .name = "pcan_usb_pro_fd",
1041 .tseg1_min = 1,
1042 .tseg1_max = 64,
1043 .tseg2_min = 1,
1044 .tseg2_max = 16,
1045 .sjw_max = 16,
1046 .brp_min = 1,
1047 .brp_max = 1024,
1048 .brp_inc = 1,
1049 },
1050 .data_bittiming_const = {
1051 .name = "pcan_usb_pro_fd",
1052 .tseg1_min = 1,
1053 .tseg1_max = 16,
1054 .tseg2_min = 1,
1055 .tseg2_max = 8,
1056 .sjw_max = 4,
1057 .brp_min = 1,
1058 .brp_max = 1024,
1059 .brp_inc = 1,
1060 },
1061
1062 /* size of device private data */
1063 .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
1064
1065 /* timestamps usage */
1066 .ts_used_bits = 32,
1067 .ts_period = 1000000, /* calibration period in ts. */
1068 .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
1069 .us_per_ts_shift = 0,
1070
1071 /* give here messages in/out endpoints */
1072 .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
1073 .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1},
1074
1075 /* size of rx/tx usb buffers */
1076 .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
1077 .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
1078
1079 /* device callbacks */
1080 .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
1081 .dev_init = pcan_usb_fd_init,
1082
1083 .dev_exit = pcan_usb_fd_exit,
1084 .dev_free = pcan_usb_fd_free,
1085 .dev_set_bus = pcan_usb_fd_set_bus,
1086 .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
1087 .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
1088 .dev_decode_buf = pcan_usb_fd_decode_buf,
1089 .dev_start = pcan_usb_fd_start,
1090 .dev_stop = pcan_usb_fd_stop,
1091 .dev_restart_async = pcan_usb_fd_restart_async,
1092 .dev_encode_msg = pcan_usb_fd_encode_msg,
1093
1094 .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
1095};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 4cfa3b8605b1..dec51717635e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -27,14 +27,6 @@
27 27
28MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter"); 28MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter");
29 29
30/* PCAN-USB Pro Endpoints */
31#define PCAN_USBPRO_EP_CMDOUT 1
32#define PCAN_USBPRO_EP_CMDIN (PCAN_USBPRO_EP_CMDOUT | USB_DIR_IN)
33#define PCAN_USBPRO_EP_MSGOUT_0 2
34#define PCAN_USBPRO_EP_MSGIN (PCAN_USBPRO_EP_MSGOUT_0 | USB_DIR_IN)
35#define PCAN_USBPRO_EP_MSGOUT_1 3
36#define PCAN_USBPRO_EP_UNUSED (PCAN_USBPRO_EP_MSGOUT_1 | USB_DIR_IN)
37
38#define PCAN_USBPRO_CHANNEL_COUNT 2 30#define PCAN_USBPRO_CHANNEL_COUNT 2
39 31
40/* PCAN-USB Pro adapter internal clock (MHz) */ 32/* PCAN-USB Pro adapter internal clock (MHz) */
@@ -322,8 +314,8 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev,
322 return (i >= PCAN_USBPRO_RSP_SUBMIT_MAX) ? -ERANGE : err; 314 return (i >= PCAN_USBPRO_RSP_SUBMIT_MAX) ? -ERANGE : err;
323} 315}
324 316
325static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, 317int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
326 int req_value, void *req_addr, int req_size) 318 int req_value, void *req_addr, int req_size)
327{ 319{
328 int err; 320 int err;
329 u8 req_type; 321 u8 req_type;
@@ -475,7 +467,7 @@ static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev,
475 return pcan_usb_pro_set_bitrate(dev, ccbt); 467 return pcan_usb_pro_set_bitrate(dev, ccbt);
476} 468}
477 469
478static void pcan_usb_pro_restart_complete(struct urb *urb) 470void pcan_usb_pro_restart_complete(struct urb *urb)
479{ 471{
480 /* can delete usb resources */ 472 /* can delete usb resources */
481 peak_usb_async_complete(urb); 473 peak_usb_async_complete(urb);
@@ -634,6 +626,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
634 switch (new_state) { 626 switch (new_state) {
635 case CAN_STATE_BUS_OFF: 627 case CAN_STATE_BUS_OFF:
636 can_frame->can_id |= CAN_ERR_BUSOFF; 628 can_frame->can_id |= CAN_ERR_BUSOFF;
629 dev->can.can_stats.bus_off++;
637 can_bus_off(netdev); 630 can_bus_off(netdev);
638 break; 631 break;
639 632
@@ -977,7 +970,7 @@ static void pcan_usb_pro_free(struct peak_usb_device *dev)
977/* 970/*
978 * probe function for new PCAN-USB Pro usb interface 971 * probe function for new PCAN-USB Pro usb interface
979 */ 972 */
980static int pcan_usb_pro_probe(struct usb_interface *intf) 973int pcan_usb_pro_probe(struct usb_interface *intf)
981{ 974{
982 struct usb_host_interface *if_desc; 975 struct usb_host_interface *if_desc;
983 int i; 976 int i;
@@ -1011,10 +1004,11 @@ static int pcan_usb_pro_probe(struct usb_interface *intf)
1011/* 1004/*
1012 * describe the PCAN-USB Pro adapter 1005 * describe the PCAN-USB Pro adapter
1013 */ 1006 */
1014struct peak_usb_adapter pcan_usb_pro = { 1007const struct peak_usb_adapter pcan_usb_pro = {
1015 .name = "PCAN-USB Pro", 1008 .name = "PCAN-USB Pro",
1016 .device_id = PCAN_USBPRO_PRODUCT_ID, 1009 .device_id = PCAN_USBPRO_PRODUCT_ID,
1017 .ctrl_count = PCAN_USBPRO_CHANNEL_COUNT, 1010 .ctrl_count = PCAN_USBPRO_CHANNEL_COUNT,
1011 .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
1018 .clock = { 1012 .clock = {
1019 .freq = PCAN_USBPRO_CRYSTAL_HZ, 1013 .freq = PCAN_USBPRO_CRYSTAL_HZ,
1020 }, 1014 },
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 837cee267132..a62f7ab8980f 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -27,6 +27,14 @@
27#define PCAN_USBPRO_INFO_BL 0 27#define PCAN_USBPRO_INFO_BL 0
28#define PCAN_USBPRO_INFO_FW 1 28#define PCAN_USBPRO_INFO_FW 1
29 29
30/* PCAN-USB Pro (FD) Endpoints */
31#define PCAN_USBPRO_EP_CMDOUT 1
32#define PCAN_USBPRO_EP_CMDIN (PCAN_USBPRO_EP_CMDOUT | USB_DIR_IN)
33#define PCAN_USBPRO_EP_MSGOUT_0 2
34#define PCAN_USBPRO_EP_MSGIN (PCAN_USBPRO_EP_MSGOUT_0 | USB_DIR_IN)
35#define PCAN_USBPRO_EP_MSGOUT_1 3
36#define PCAN_USBPRO_EP_UNUSED (PCAN_USBPRO_EP_MSGOUT_1 | USB_DIR_IN)
37
30/* Vendor Request value for XXX_FCT */ 38/* Vendor Request value for XXX_FCT */
31#define PCAN_USBPRO_FCT_DRVLD 5 /* tell device driver is loaded */ 39#define PCAN_USBPRO_FCT_DRVLD 5 /* tell device driver is loaded */
32#define PCAN_USBPRO_FCT_DRVLD_REQ_LEN 16 40#define PCAN_USBPRO_FCT_DRVLD_REQ_LEN 16
@@ -176,4 +184,9 @@ union pcan_usb_pro_rec {
176 struct pcan_usb_pro_txmsg tx_msg; 184 struct pcan_usb_pro_txmsg tx_msg;
177}; 185};
178 186
187int pcan_usb_pro_probe(struct usb_interface *intf);
188int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
189 int req_value, void *req_addr, int req_size);
190void pcan_usb_pro_restart_complete(struct urb *urb);
191
179#endif 192#endif
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index ef674ecb82f8..dd52c7a4c80d 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -377,6 +377,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
377 case USB_8DEV_STATUSMSG_BUSOFF: 377 case USB_8DEV_STATUSMSG_BUSOFF:
378 priv->can.state = CAN_STATE_BUS_OFF; 378 priv->can.state = CAN_STATE_BUS_OFF;
379 cf->can_id |= CAN_ERR_BUSOFF; 379 cf->can_id |= CAN_ERR_BUSOFF;
380 priv->can.can_stats.bus_off++;
380 can_bus_off(priv->netdev); 381 can_bus_off(priv->netdev);
381 break; 382 break;
382 case USB_8DEV_STATUSMSG_OVERRUN: 383 case USB_8DEV_STATUSMSG_OVERRUN:
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index feb29c4526f7..4daffb284931 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -233,6 +233,35 @@ static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
233 core_writel(priv, reg, CORE_EEE_EN_CTRL); 233 core_writel(priv, reg, CORE_EEE_EN_CTRL);
234} 234}
235 235
236static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
237{
238 struct bcm_sf2_priv *priv = ds_to_priv(ds);
239 u32 reg;
240
241 reg = reg_readl(priv, REG_SPHY_CNTRL);
242 if (enable) {
243 reg |= PHY_RESET;
244 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
245 reg_writel(priv, reg, REG_SPHY_CNTRL);
246 udelay(21);
247 reg = reg_readl(priv, REG_SPHY_CNTRL);
248 reg &= ~PHY_RESET;
249 } else {
250 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
251 reg_writel(priv, reg, REG_SPHY_CNTRL);
252 mdelay(1);
253 reg |= CK25_DIS;
254 }
255 reg_writel(priv, reg, REG_SPHY_CNTRL);
256
257 /* Use PHY-driven LED signaling */
258 if (!enable) {
259 reg = reg_readl(priv, REG_LED_CNTRL(0));
260 reg |= SPDLNK_SRC_SEL;
261 reg_writel(priv, reg, REG_LED_CNTRL(0));
262 }
263}
264
236static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, 265static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
237 struct phy_device *phy) 266 struct phy_device *phy)
238{ 267{
@@ -248,6 +277,24 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
248 /* Clear the Rx and Tx disable bits and set to no spanning tree */ 277 /* Clear the Rx and Tx disable bits and set to no spanning tree */
249 core_writel(priv, 0, CORE_G_PCTL_PORT(port)); 278 core_writel(priv, 0, CORE_G_PCTL_PORT(port));
250 279
280 /* Re-enable the GPHY and re-apply workarounds */
281 if (port == 0 && priv->hw_params.num_gphy == 1) {
282 bcm_sf2_gphy_enable_set(ds, true);
283 if (phy) {
284 /* if phy_stop() has been called before, phy
285 * will be in halted state, and phy_start()
286 * will call resume.
287 *
288 * the resume path does not configure back
289 * autoneg settings, and since we hard reset
290 * the phy manually here, we need to reset the
291 * state machine also.
292 */
293 phy->state = PHY_READY;
294 phy_init_hw(phy);
295 }
296 }
297
251 /* Enable port 7 interrupts to get notified */ 298 /* Enable port 7 interrupts to get notified */
252 if (port == 7) 299 if (port == 7)
253 intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); 300 intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
@@ -281,6 +328,9 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
281 intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR); 328 intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR);
282 } 329 }
283 330
331 if (port == 0 && priv->hw_params.num_gphy == 1)
332 bcm_sf2_gphy_enable_set(ds, false);
333
284 if (dsa_is_cpu_port(ds, port)) 334 if (dsa_is_cpu_port(ds, port))
285 off = CORE_IMP_CTL; 335 off = CORE_IMP_CTL;
286 else 336 else
@@ -400,6 +450,16 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
400 return 0; 450 return 0;
401} 451}
402 452
453static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
454{
455 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
456 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
457 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
458 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
459 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
460 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
461}
462
403static int bcm_sf2_sw_setup(struct dsa_switch *ds) 463static int bcm_sf2_sw_setup(struct dsa_switch *ds)
404{ 464{
405 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 465 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -440,12 +500,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
440 } 500 }
441 501
442 /* Disable all interrupts and request them */ 502 /* Disable all interrupts and request them */
443 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 503 bcm_sf2_intr_disable(priv);
444 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
445 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
446 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
447 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
448 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
449 504
450 ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, 505 ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
451 "switch_0", priv); 506 "switch_0", priv);
@@ -747,12 +802,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
747 struct bcm_sf2_priv *priv = ds_to_priv(ds); 802 struct bcm_sf2_priv *priv = ds_to_priv(ds);
748 unsigned int port; 803 unsigned int port;
749 804
750 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 805 bcm_sf2_intr_disable(priv);
751 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
752 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
753 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
754 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
755 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
756 806
757 /* Disable all ports physically present including the IMP 807 /* Disable all ports physically present including the IMP
758 * port, the other ones have already been disabled during 808 * port, the other ones have already been disabled during
@@ -771,7 +821,6 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
771{ 821{
772 struct bcm_sf2_priv *priv = ds_to_priv(ds); 822 struct bcm_sf2_priv *priv = ds_to_priv(ds);
773 unsigned int port; 823 unsigned int port;
774 u32 reg;
775 int ret; 824 int ret;
776 825
777 ret = bcm_sf2_sw_rst(priv); 826 ret = bcm_sf2_sw_rst(priv);
@@ -780,17 +829,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
780 return ret; 829 return ret;
781 } 830 }
782 831
783 /* Reinitialize the single GPHY */ 832 if (priv->hw_params.num_gphy == 1)
784 if (priv->hw_params.num_gphy == 1) { 833 bcm_sf2_gphy_enable_set(ds, true);
785 reg = reg_readl(priv, REG_SPHY_CNTRL);
786 reg |= PHY_RESET;
787 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS);
788 reg_writel(priv, reg, REG_SPHY_CNTRL);
789 udelay(21);
790 reg = reg_readl(priv, REG_SPHY_CNTRL);
791 reg &= ~PHY_RESET;
792 reg_writel(priv, reg, REG_SPHY_CNTRL);
793 }
794 834
795 for (port = 0; port < DSA_MAX_PORTS; port++) { 835 for (port = 0; port < DSA_MAX_PORTS; port++) {
796 if ((1 << port) & ds->phys_port_mask) 836 if ((1 << port) & ds->phys_port_mask)
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 1bb49cb699ab..cabdfa5e217a 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -61,6 +61,10 @@
61#define LPI_COUNT_SHIFT 9 61#define LPI_COUNT_SHIFT 9
62#define LPI_COUNT_MASK 0x3F 62#define LPI_COUNT_MASK 0x3F
63 63
64#define REG_LED_CNTRL_BASE 0x90
65#define REG_LED_CNTRL(x) (REG_LED_CNTRL_BASE + (x) * 4)
66#define SPDLNK_SRC_SEL (1 << 24)
67
64/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */ 68/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
65#define INTRL2_CPU_STATUS 0x00 69#define INTRL2_CPU_STATUS 0x00
66#define INTRL2_CPU_SET 0x04 70#define INTRL2_CPU_SET 0x04
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 1230f52aa70e..2540ef0142af 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -139,7 +139,8 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
139 int nexthop; 139 int nexthop;
140 140
141 nexthop = 0x1f; 141 nexthop = 0x1f;
142 if (i != ds->index && i < ds->dst->pd->nr_chips) 142 if (ds->pd->rtable &&
143 i != ds->index && i < ds->dst->pd->nr_chips)
143 nexthop = ds->pd->rtable[i] & 0x1f; 144 nexthop = ds->pd->rtable[i] & 0x1f;
144 145
145 REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); 146 REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 258d9ef5ef25..e13adc7b3dda 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -22,17 +22,14 @@
22#include <net/dsa.h> 22#include <net/dsa.h>
23#include "mv88e6xxx.h" 23#include "mv88e6xxx.h"
24 24
25static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask) 25static int mv88e6352_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
26{ 26{
27 unsigned long timeout = jiffies + HZ / 10; 27 unsigned long timeout = jiffies + HZ / 10;
28 28
29 while (time_before(jiffies, timeout)) { 29 while (time_before(jiffies, timeout)) {
30 int ret; 30 int ret;
31 31
32 ret = REG_READ(REG_GLOBAL2, reg); 32 ret = REG_READ(reg, offset);
33 if (ret < 0)
34 return ret;
35
36 if (!(ret & mask)) 33 if (!(ret & mask))
37 return 0; 34 return 0;
38 35
@@ -43,17 +40,17 @@ static int mv88e6352_wait(struct dsa_switch *ds, int reg, u16 mask)
43 40
44static inline int mv88e6352_phy_wait(struct dsa_switch *ds) 41static inline int mv88e6352_phy_wait(struct dsa_switch *ds)
45{ 42{
46 return mv88e6352_wait(ds, 0x18, 0x8000); 43 return mv88e6352_wait(ds, REG_GLOBAL2, 0x18, 0x8000);
47} 44}
48 45
49static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds) 46static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds)
50{ 47{
51 return mv88e6352_wait(ds, 0x14, 0x0800); 48 return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x0800);
52} 49}
53 50
54static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds) 51static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds)
55{ 52{
56 return mv88e6352_wait(ds, 0x14, 0x8000); 53 return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x8000);
57} 54}
58 55
59static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum) 56static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum)
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index cd6807c6b4ed..3e7e31a6abb7 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -85,6 +85,12 @@ int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
85 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg); 85 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
86 mutex_unlock(&ps->smi_mutex); 86 mutex_unlock(&ps->smi_mutex);
87 87
88 if (ret < 0)
89 return ret;
90
91 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
92 addr, reg, ret);
93
88 return ret; 94 return ret;
89} 95}
90 96
@@ -128,6 +134,9 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
128 if (bus == NULL) 134 if (bus == NULL)
129 return -EINVAL; 135 return -EINVAL;
130 136
137 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
138 addr, reg, val);
139
131 mutex_lock(&ps->smi_mutex); 140 mutex_lock(&ps->smi_mutex);
132 ret = __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val); 141 ret = __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
133 mutex_unlock(&ps->smi_mutex); 142 mutex_unlock(&ps->smi_mutex);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index dede43f4ce09..8f8418d2ac4a 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -769,11 +769,11 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
769 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM; 769 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
770 } 770 }
771 771
772 if(vlan_tx_tag_present(skb)) { 772 if (skb_vlan_tag_present(skb)) {
773 first_txd->processFlags |= 773 first_txd->processFlags |=
774 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; 774 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
775 first_txd->processFlags |= 775 first_txd->processFlags |=
776 cpu_to_le32(htons(vlan_tx_tag_get(skb)) << 776 cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
777 TYPHOON_TX_PF_VLAN_TAG_SHIFT); 777 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
778 } 778 }
779 779
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index b68074803de3..b90a26b13fdf 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2429,9 +2429,9 @@ restart:
2429 flagsize = (skb->len << 16) | (BD_FLG_END); 2429 flagsize = (skb->len << 16) | (BD_FLG_END);
2430 if (skb->ip_summed == CHECKSUM_PARTIAL) 2430 if (skb->ip_summed == CHECKSUM_PARTIAL)
2431 flagsize |= BD_FLG_TCP_UDP_SUM; 2431 flagsize |= BD_FLG_TCP_UDP_SUM;
2432 if (vlan_tx_tag_present(skb)) { 2432 if (skb_vlan_tag_present(skb)) {
2433 flagsize |= BD_FLG_VLAN_TAG; 2433 flagsize |= BD_FLG_VLAN_TAG;
2434 vlan_tag = vlan_tx_tag_get(skb); 2434 vlan_tag = skb_vlan_tag_get(skb);
2435 } 2435 }
2436 desc = ap->tx_ring + idx; 2436 desc = ap->tx_ring + idx;
2437 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); 2437 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
@@ -2450,9 +2450,9 @@ restart:
2450 flagsize = (skb_headlen(skb) << 16); 2450 flagsize = (skb_headlen(skb) << 16);
2451 if (skb->ip_summed == CHECKSUM_PARTIAL) 2451 if (skb->ip_summed == CHECKSUM_PARTIAL)
2452 flagsize |= BD_FLG_TCP_UDP_SUM; 2452 flagsize |= BD_FLG_TCP_UDP_SUM;
2453 if (vlan_tx_tag_present(skb)) { 2453 if (skb_vlan_tag_present(skb)) {
2454 flagsize |= BD_FLG_VLAN_TAG; 2454 flagsize |= BD_FLG_VLAN_TAG;
2455 vlan_tag = vlan_tx_tag_get(skb); 2455 vlan_tag = skb_vlan_tag_get(skb);
2456 } 2456 }
2457 2457
2458 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); 2458 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 77f1f6048ddd..c638c85f3954 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,7 +179,7 @@ config SUNLANCE
179 179
180config AMD_XGBE 180config AMD_XGBE
181 tristate "AMD 10GbE Ethernet driver" 181 tristate "AMD 10GbE Ethernet driver"
182 depends on OF_NET && HAS_IOMEM 182 depends on (OF_NET || ACPI) && HAS_IOMEM
183 select PHYLIB 183 select PHYLIB
184 select AMD_XGBE_PHY 184 select AMD_XGBE_PHY
185 select BITREVERSE 185 select BITREVERSE
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 841e6558db68..4c2ae2221780 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1299,11 +1299,11 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1299 lp->tx_ring[tx_index].tx_flags = 0; 1299 lp->tx_ring[tx_index].tx_flags = 0;
1300 1300
1301#if AMD8111E_VLAN_TAG_USED 1301#if AMD8111E_VLAN_TAG_USED
1302 if (vlan_tx_tag_present(skb)) { 1302 if (skb_vlan_tag_present(skb)) {
1303 lp->tx_ring[tx_index].tag_ctrl_cmd |= 1303 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1304 cpu_to_le16(TCC_VLAN_INSERT); 1304 cpu_to_le16(TCC_VLAN_INSERT);
1305 lp->tx_ring[tx_index].tag_ctrl_info = 1305 lp->tx_ring[tx_index].tag_ctrl_info =
1306 cpu_to_le16(vlan_tx_tag_get(skb)); 1306 cpu_to_le16(skb_vlan_tag_get(skb));
1307 1307
1308 } 1308 }
1309#endif 1309#endif
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index e2e3aaf501a2..11d6e6561df1 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -2806,7 +2806,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
2806 2806
2807/* 2807/*
2808 * Check for loss of link and link establishment. 2808 * Check for loss of link and link establishment.
2809 * Can not use mii_check_media because it does nothing if mode is forced. 2809 * Could possibly be changed to use mii_check_media instead.
2810 */ 2810 */
2811 2811
2812static void pcnet32_watchdog(struct net_device *dev) 2812static void pcnet32_watchdog(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 76479d04b903..2c063b60db4b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -328,7 +328,7 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
328 328
329 buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); 329 buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
330 pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL); 330 pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
331 if (pdata->xgbe_debugfs == NULL) { 331 if (!pdata->xgbe_debugfs) {
332 netdev_err(pdata->netdev, "debugfs_create_dir failed\n"); 332 netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
333 return; 333 return;
334 } 334 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index a50891f52197..d81fc6bd4759 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -422,7 +422,6 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
422 422
423 ring->cur = 0; 423 ring->cur = 0;
424 ring->dirty = 0; 424 ring->dirty = 0;
425 memset(&ring->rx, 0, sizeof(ring->rx));
426 425
427 hw_if->rx_desc_init(channel); 426 hw_if->rx_desc_init(channel);
428 } 427 }
@@ -621,35 +620,6 @@ err_out:
621 return 0; 620 return 0;
622} 621}
623 622
624static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
625{
626 struct xgbe_prv_data *pdata = channel->pdata;
627 struct xgbe_hw_if *hw_if = &pdata->hw_if;
628 struct xgbe_ring *ring = channel->rx_ring;
629 struct xgbe_ring_data *rdata;
630 int i;
631
632 DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
633 ring->rx.realloc_index);
634
635 for (i = 0; i < ring->dirty; i++) {
636 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
637
638 /* Reset rdata values */
639 xgbe_unmap_rdata(pdata, rdata);
640
641 if (xgbe_map_rx_buffer(pdata, ring, rdata))
642 break;
643
644 hw_if->rx_desc_reset(rdata);
645
646 ring->rx.realloc_index++;
647 }
648 ring->dirty = 0;
649
650 DBGPR("<--xgbe_realloc_rx_buffer\n");
651}
652
653void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) 623void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
654{ 624{
655 DBGPR("-->xgbe_init_function_ptrs_desc\n"); 625 DBGPR("-->xgbe_init_function_ptrs_desc\n");
@@ -657,7 +627,7 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
657 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; 627 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
658 desc_if->free_ring_resources = xgbe_free_ring_resources; 628 desc_if->free_ring_resources = xgbe_free_ring_resources;
659 desc_if->map_tx_skb = xgbe_map_tx_skb; 629 desc_if->map_tx_skb = xgbe_map_tx_skb;
660 desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer; 630 desc_if->map_rx_buffer = xgbe_map_rx_buffer;
661 desc_if->unmap_rdata = xgbe_unmap_rdata; 631 desc_if->unmap_rdata = xgbe_unmap_rdata;
662 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; 632 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
663 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; 633 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 4c66cd1d1e60..400757b49872 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -115,6 +115,7 @@
115 */ 115 */
116 116
117#include <linux/phy.h> 117#include <linux/phy.h>
118#include <linux/mdio.h>
118#include <linux/clk.h> 119#include <linux/clk.h>
119#include <linux/bitrev.h> 120#include <linux/bitrev.h>
120#include <linux/crc32.h> 121#include <linux/crc32.h>
@@ -130,7 +131,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
130 131
131 DBGPR("-->xgbe_usec_to_riwt\n"); 132 DBGPR("-->xgbe_usec_to_riwt\n");
132 133
133 rate = clk_get_rate(pdata->sysclk); 134 rate = pdata->sysclk_rate;
134 135
135 /* 136 /*
136 * Convert the input usec value to the watchdog timer value. Each 137 * Convert the input usec value to the watchdog timer value. Each
@@ -153,7 +154,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
153 154
154 DBGPR("-->xgbe_riwt_to_usec\n"); 155 DBGPR("-->xgbe_riwt_to_usec\n");
155 156
156 rate = clk_get_rate(pdata->sysclk); 157 rate = pdata->sysclk_rate;
157 158
158 /* 159 /*
159 * Convert the input watchdog timer value to the usec value. Each 160 * Convert the input watchdog timer value to the usec value. Each
@@ -673,6 +674,9 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
673 674
674static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) 675static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
675{ 676{
677 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
678 return 0;
679
676 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3); 680 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
677 681
678 return 0; 682 return 0;
@@ -680,6 +684,9 @@ static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
680 684
681static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata) 685static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
682{ 686{
687 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
688 return 0;
689
683 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2); 690 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
684 691
685 return 0; 692 return 0;
@@ -687,6 +694,9 @@ static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
687 694
688static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) 695static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
689{ 696{
697 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
698 return 0;
699
690 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0); 700 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
691 701
692 return 0; 702 return 0;
@@ -881,6 +891,23 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
881 else 891 else
882 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 892 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
883 893
894 /* If the PCS is changing modes, match the MAC speed to it */
895 if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
896 ((mmd_address & 0xffff) == MDIO_CTRL2)) {
897 struct phy_device *phydev = pdata->phydev;
898
899 if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
900 /* KX mode */
901 if (phydev->supported & SUPPORTED_1000baseKX_Full)
902 xgbe_set_gmii_speed(pdata);
903 else
904 xgbe_set_gmii_2500_speed(pdata);
905 } else {
906 /* KR mode */
907 xgbe_set_xgmii_speed(pdata);
908 }
909 }
910
884 /* The PCS registers are accessed using mmio. The underlying APB3 911 /* The PCS registers are accessed using mmio. The underlying APB3
885 * management interface uses indirect addressing to access the MMD 912 * management interface uses indirect addressing to access the MMD
886 * register sets. This requires accessing of the PCS register in two 913 * register sets. This requires accessing of the PCS register in two
@@ -1359,6 +1386,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1359 unsigned int tso_context, vlan_context; 1386 unsigned int tso_context, vlan_context;
1360 unsigned int tx_set_ic; 1387 unsigned int tx_set_ic;
1361 int start_index = ring->cur; 1388 int start_index = ring->cur;
1389 int cur_index = ring->cur;
1362 int i; 1390 int i;
1363 1391
1364 DBGPR("-->xgbe_dev_xmit\n"); 1392 DBGPR("-->xgbe_dev_xmit\n");
@@ -1401,7 +1429,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1401 else 1429 else
1402 tx_set_ic = 0; 1430 tx_set_ic = 0;
1403 1431
1404 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1432 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1405 rdesc = rdata->rdesc; 1433 rdesc = rdata->rdesc;
1406 1434
1407 /* Create a context descriptor if this is a TSO packet */ 1435 /* Create a context descriptor if this is a TSO packet */
@@ -1444,8 +1472,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1444 ring->tx.cur_vlan_ctag = packet->vlan_ctag; 1472 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1445 } 1473 }
1446 1474
1447 ring->cur++; 1475 cur_index++;
1448 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1476 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1449 rdesc = rdata->rdesc; 1477 rdesc = rdata->rdesc;
1450 } 1478 }
1451 1479
@@ -1473,7 +1501,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1473 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1501 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1474 1502
1475 /* Set OWN bit if not the first descriptor */ 1503 /* Set OWN bit if not the first descriptor */
1476 if (ring->cur != start_index) 1504 if (cur_index != start_index)
1477 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1505 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1478 1506
1479 if (tso) { 1507 if (tso) {
@@ -1497,9 +1525,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1497 packet->length); 1525 packet->length);
1498 } 1526 }
1499 1527
1500 for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) { 1528 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1501 ring->cur++; 1529 cur_index++;
1502 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1530 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1503 rdesc = rdata->rdesc; 1531 rdesc = rdata->rdesc;
1504 1532
1505 /* Update buffer address */ 1533 /* Update buffer address */
@@ -1551,7 +1579,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
1551 /* Make sure ownership is written to the descriptor */ 1579 /* Make sure ownership is written to the descriptor */
1552 wmb(); 1580 wmb();
1553 1581
1554 ring->cur++; 1582 ring->cur = cur_index + 1;
1555 if (!packet->skb->xmit_more || 1583 if (!packet->skb->xmit_more ||
1556 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, 1584 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1557 channel->queue_index))) 1585 channel->queue_index)))
@@ -2107,6 +2135,23 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2107 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 2135 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2108} 2136}
2109 2137
2138static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2139{
2140 switch (pdata->phy_speed) {
2141 case SPEED_10000:
2142 xgbe_set_xgmii_speed(pdata);
2143 break;
2144
2145 case SPEED_2500:
2146 xgbe_set_gmii_2500_speed(pdata);
2147 break;
2148
2149 case SPEED_1000:
2150 xgbe_set_gmii_speed(pdata);
2151 break;
2152 }
2153}
2154
2110static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) 2155static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2111{ 2156{
2112 if (pdata->netdev->features & NETIF_F_RXCSUM) 2157 if (pdata->netdev->features & NETIF_F_RXCSUM)
@@ -2757,6 +2802,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
2757 xgbe_config_mac_address(pdata); 2802 xgbe_config_mac_address(pdata);
2758 xgbe_config_jumbo_enable(pdata); 2803 xgbe_config_jumbo_enable(pdata);
2759 xgbe_config_flow_control(pdata); 2804 xgbe_config_flow_control(pdata);
2805 xgbe_config_mac_speed(pdata);
2760 xgbe_config_checksum_offload(pdata); 2806 xgbe_config_checksum_offload(pdata);
2761 xgbe_config_vlan_support(pdata); 2807 xgbe_config_vlan_support(pdata);
2762 xgbe_config_mmc(pdata); 2808 xgbe_config_mmc(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index e5ffb2ccb67d..b93d4404d975 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -225,6 +225,11 @@ static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
225 return (ring->rdesc_count - (ring->cur - ring->dirty)); 225 return (ring->rdesc_count - (ring->cur - ring->dirty));
226} 226}
227 227
228static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
229{
230 return (ring->cur - ring->dirty);
231}
232
228static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, 233static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
229 struct xgbe_ring *ring, unsigned int count) 234 struct xgbe_ring *ring, unsigned int count)
230{ 235{
@@ -337,12 +342,13 @@ static irqreturn_t xgbe_isr(int irq, void *data)
337 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); 342 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
338 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); 343 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
339 344
340 /* If we get a TI or RI interrupt that means per channel DMA 345 /* The TI or RI interrupt bits may still be set even if using
341 * interrupts are not enabled, so we use the private data napi 346 * per channel DMA interrupts. Check to be sure those are not
342 * structure, not the per channel napi structure 347 * enabled before using the private data napi structure.
343 */ 348 */
344 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || 349 if (!pdata->per_channel_irq &&
345 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) { 350 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
351 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
346 if (napi_schedule_prep(&pdata->napi)) { 352 if (napi_schedule_prep(&pdata->napi)) {
347 /* Disable Tx and Rx interrupts */ 353 /* Disable Tx and Rx interrupts */
348 xgbe_disable_rx_tx_ints(pdata); 354 xgbe_disable_rx_tx_ints(pdata);
@@ -410,17 +416,13 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
410 struct xgbe_channel *channel = container_of(timer, 416 struct xgbe_channel *channel = container_of(timer,
411 struct xgbe_channel, 417 struct xgbe_channel,
412 tx_timer); 418 tx_timer);
413 struct xgbe_ring *ring = channel->tx_ring;
414 struct xgbe_prv_data *pdata = channel->pdata; 419 struct xgbe_prv_data *pdata = channel->pdata;
415 struct napi_struct *napi; 420 struct napi_struct *napi;
416 unsigned long flags;
417 421
418 DBGPR("-->xgbe_tx_timer\n"); 422 DBGPR("-->xgbe_tx_timer\n");
419 423
420 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; 424 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
421 425
422 spin_lock_irqsave(&ring->lock, flags);
423
424 if (napi_schedule_prep(napi)) { 426 if (napi_schedule_prep(napi)) {
425 /* Disable Tx and Rx interrupts */ 427 /* Disable Tx and Rx interrupts */
426 if (pdata->per_channel_irq) 428 if (pdata->per_channel_irq)
@@ -434,8 +436,6 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
434 436
435 channel->tx_timer_active = 0; 437 channel->tx_timer_active = 0;
436 438
437 spin_unlock_irqrestore(&ring->lock, flags);
438
439 DBGPR("<--xgbe_tx_timer\n"); 439 DBGPR("<--xgbe_tx_timer\n");
440 440
441 return HRTIMER_NORESTART; 441 return HRTIMER_NORESTART;
@@ -694,7 +694,7 @@ static void xgbe_adjust_link(struct net_device *netdev)
694 struct phy_device *phydev = pdata->phydev; 694 struct phy_device *phydev = pdata->phydev;
695 int new_state = 0; 695 int new_state = 0;
696 696
697 if (phydev == NULL) 697 if (!phydev)
698 return; 698 return;
699 699
700 if (phydev->link) { 700 if (phydev->link) {
@@ -929,7 +929,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
929 DBGPR("<--xgbe_stop\n"); 929 DBGPR("<--xgbe_stop\n");
930} 930}
931 931
932static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset) 932static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
933{ 933{
934 struct xgbe_channel *channel; 934 struct xgbe_channel *channel;
935 struct xgbe_hw_if *hw_if = &pdata->hw_if; 935 struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -952,9 +952,8 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
952 xgbe_free_tx_data(pdata); 952 xgbe_free_tx_data(pdata);
953 xgbe_free_rx_data(pdata); 953 xgbe_free_rx_data(pdata);
954 954
955 /* Issue software reset to device if requested */ 955 /* Issue software reset to device */
956 if (reset) 956 hw_if->exit(pdata);
957 hw_if->exit(pdata);
958 957
959 xgbe_start(pdata); 958 xgbe_start(pdata);
960 959
@@ -969,7 +968,7 @@ static void xgbe_restart(struct work_struct *work)
969 968
970 rtnl_lock(); 969 rtnl_lock();
971 970
972 xgbe_restart_dev(pdata, 1); 971 xgbe_restart_dev(pdata);
973 972
974 rtnl_unlock(); 973 rtnl_unlock();
975} 974}
@@ -1167,8 +1166,8 @@ static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1167 1166
1168static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) 1167static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1169{ 1168{
1170 if (vlan_tx_tag_present(skb)) 1169 if (skb_vlan_tag_present(skb))
1171 packet->vlan_ctag = vlan_tx_tag_get(skb); 1170 packet->vlan_ctag = skb_vlan_tag_get(skb);
1172} 1171}
1173 1172
1174static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) 1173static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
@@ -1249,9 +1248,9 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1249 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1248 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1250 CSUM_ENABLE, 1); 1249 CSUM_ENABLE, 1);
1251 1250
1252 if (vlan_tx_tag_present(skb)) { 1251 if (skb_vlan_tag_present(skb)) {
1253 /* VLAN requires an extra descriptor if tag is different */ 1252 /* VLAN requires an extra descriptor if tag is different */
1254 if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag) 1253 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1255 /* We can share with the TSO context descriptor */ 1254 /* We can share with the TSO context descriptor */
1256 if (!context_desc) { 1255 if (!context_desc) {
1257 context_desc = 1; 1256 context_desc = 1;
@@ -1448,7 +1447,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1448 struct xgbe_ring *ring; 1447 struct xgbe_ring *ring;
1449 struct xgbe_packet_data *packet; 1448 struct xgbe_packet_data *packet;
1450 struct netdev_queue *txq; 1449 struct netdev_queue *txq;
1451 unsigned long flags;
1452 int ret; 1450 int ret;
1453 1451
1454 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); 1452 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
@@ -1460,8 +1458,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1460 1458
1461 ret = NETDEV_TX_OK; 1459 ret = NETDEV_TX_OK;
1462 1460
1463 spin_lock_irqsave(&ring->lock, flags);
1464
1465 if (skb->len == 0) { 1461 if (skb->len == 0) {
1466 netdev_err(netdev, "empty skb received from stack\n"); 1462 netdev_err(netdev, "empty skb received from stack\n");
1467 dev_kfree_skb_any(skb); 1463 dev_kfree_skb_any(skb);
@@ -1508,10 +1504,6 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1508 ret = NETDEV_TX_OK; 1504 ret = NETDEV_TX_OK;
1509 1505
1510tx_netdev_return: 1506tx_netdev_return:
1511 spin_unlock_irqrestore(&ring->lock, flags);
1512
1513 DBGPR("<--xgbe_xmit\n");
1514
1515 return ret; 1507 return ret;
1516} 1508}
1517 1509
@@ -1589,7 +1581,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1589 pdata->rx_buf_size = ret; 1581 pdata->rx_buf_size = ret;
1590 netdev->mtu = mtu; 1582 netdev->mtu = mtu;
1591 1583
1592 xgbe_restart_dev(pdata, 0); 1584 xgbe_restart_dev(pdata);
1593 1585
1594 DBGPR("<--xgbe_change_mtu\n"); 1586 DBGPR("<--xgbe_change_mtu\n");
1595 1587
@@ -1778,15 +1770,28 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
1778static void xgbe_rx_refresh(struct xgbe_channel *channel) 1770static void xgbe_rx_refresh(struct xgbe_channel *channel)
1779{ 1771{
1780 struct xgbe_prv_data *pdata = channel->pdata; 1772 struct xgbe_prv_data *pdata = channel->pdata;
1773 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1781 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1774 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1782 struct xgbe_ring *ring = channel->rx_ring; 1775 struct xgbe_ring *ring = channel->rx_ring;
1783 struct xgbe_ring_data *rdata; 1776 struct xgbe_ring_data *rdata;
1784 1777
1785 desc_if->realloc_rx_buffer(channel); 1778 while (ring->dirty != ring->cur) {
1779 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1780
1781 /* Reset rdata values */
1782 desc_if->unmap_rdata(pdata, rdata);
1783
1784 if (desc_if->map_rx_buffer(pdata, ring, rdata))
1785 break;
1786
1787 hw_if->rx_desc_reset(rdata);
1788
1789 ring->dirty++;
1790 }
1786 1791
1787 /* Update the Rx Tail Pointer Register with address of 1792 /* Update the Rx Tail Pointer Register with address of
1788 * the last cleaned entry */ 1793 * the last cleaned entry */
1789 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1); 1794 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
1790 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 1795 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1791 lower_32_bits(rdata->rdesc_dma)); 1796 lower_32_bits(rdata->rdesc_dma));
1792} 1797}
@@ -1826,7 +1831,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1826 struct xgbe_ring_desc *rdesc; 1831 struct xgbe_ring_desc *rdesc;
1827 struct net_device *netdev = pdata->netdev; 1832 struct net_device *netdev = pdata->netdev;
1828 struct netdev_queue *txq; 1833 struct netdev_queue *txq;
1829 unsigned long flags;
1830 int processed = 0; 1834 int processed = 0;
1831 unsigned int tx_packets = 0, tx_bytes = 0; 1835 unsigned int tx_packets = 0, tx_bytes = 0;
1832 1836
@@ -1838,8 +1842,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1838 1842
1839 txq = netdev_get_tx_queue(netdev, channel->queue_index); 1843 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1840 1844
1841 spin_lock_irqsave(&ring->lock, flags);
1842
1843 while ((processed < XGBE_TX_DESC_MAX_PROC) && 1845 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1844 (ring->dirty != ring->cur)) { 1846 (ring->dirty != ring->cur)) {
1845 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); 1847 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
@@ -1870,7 +1872,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1870 } 1872 }
1871 1873
1872 if (!processed) 1874 if (!processed)
1873 goto unlock; 1875 return 0;
1874 1876
1875 netdev_tx_completed_queue(txq, tx_packets, tx_bytes); 1877 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1876 1878
@@ -1882,9 +1884,6 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
1882 1884
1883 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); 1885 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1884 1886
1885unlock:
1886 spin_unlock_irqrestore(&ring->lock, flags);
1887
1888 return processed; 1887 return processed;
1889} 1888}
1890 1889
@@ -1936,7 +1935,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1936read_again: 1935read_again:
1937 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1936 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1938 1937
1939 if (ring->dirty > (XGBE_RX_DESC_CNT >> 3)) 1938 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
1940 xgbe_rx_refresh(channel); 1939 xgbe_rx_refresh(channel);
1941 1940
1942 if (hw_if->dev_read(channel)) 1941 if (hw_if->dev_read(channel))
@@ -1944,7 +1943,6 @@ read_again:
1944 1943
1945 received++; 1944 received++;
1946 ring->cur++; 1945 ring->cur++;
1947 ring->dirty++;
1948 1946
1949 incomplete = XGMAC_GET_BITS(packet->attributes, 1947 incomplete = XGMAC_GET_BITS(packet->attributes,
1950 RX_PACKET_ATTRIBUTES, 1948 RX_PACKET_ATTRIBUTES,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index dbd3850b8b0a..32dd65137051 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -123,7 +123,10 @@
123#include <linux/io.h> 123#include <linux/io.h>
124#include <linux/of.h> 124#include <linux/of.h>
125#include <linux/of_net.h> 125#include <linux/of_net.h>
126#include <linux/of_address.h>
126#include <linux/clk.h> 127#include <linux/clk.h>
128#include <linux/property.h>
129#include <linux/acpi.h>
127 130
128#include "xgbe.h" 131#include "xgbe.h"
129#include "xgbe-common.h" 132#include "xgbe-common.h"
@@ -148,6 +151,7 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
148 pdata->pause_autoneg = 1; 151 pdata->pause_autoneg = 1;
149 pdata->tx_pause = 1; 152 pdata->tx_pause = 1;
150 pdata->rx_pause = 1; 153 pdata->rx_pause = 1;
154 pdata->phy_speed = SPEED_UNKNOWN;
151 pdata->power_down = 0; 155 pdata->power_down = 0;
152 pdata->default_autoneg = AUTONEG_ENABLE; 156 pdata->default_autoneg = AUTONEG_ENABLE;
153 pdata->default_speed = SPEED_10000; 157 pdata->default_speed = SPEED_10000;
@@ -161,6 +165,96 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
161 xgbe_init_function_ptrs_desc(&pdata->desc_if); 165 xgbe_init_function_ptrs_desc(&pdata->desc_if);
162} 166}
163 167
168#ifdef CONFIG_ACPI
169static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
170{
171 struct acpi_device *adev = pdata->adev;
172 struct device *dev = pdata->dev;
173 u32 property;
174 acpi_handle handle;
175 acpi_status status;
176 unsigned long long data;
177 int cca;
178 int ret;
179
180 /* Obtain the system clock setting */
181 ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
182 if (ret) {
183 dev_err(dev, "unable to obtain %s property\n",
184 XGBE_ACPI_DMA_FREQ);
185 return ret;
186 }
187 pdata->sysclk_rate = property;
188
189 /* Obtain the PTP clock setting */
190 ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
191 if (ret) {
192 dev_err(dev, "unable to obtain %s property\n",
193 XGBE_ACPI_PTP_FREQ);
194 return ret;
195 }
196 pdata->ptpclk_rate = property;
197
198 /* Retrieve the device cache coherency value */
199 handle = adev->handle;
200 do {
201 status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
202 if (!ACPI_FAILURE(status)) {
203 cca = data;
204 break;
205 }
206
207 status = acpi_get_parent(handle, &handle);
208 } while (!ACPI_FAILURE(status));
209
210 if (ACPI_FAILURE(status)) {
211 dev_err(dev, "error obtaining acpi coherency value\n");
212 return -EINVAL;
213 }
214 pdata->coherent = !!cca;
215
216 return 0;
217}
218#else /* CONFIG_ACPI */
219static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
220{
221 return -EINVAL;
222}
223#endif /* CONFIG_ACPI */
224
225#ifdef CONFIG_OF
226static int xgbe_of_support(struct xgbe_prv_data *pdata)
227{
228 struct device *dev = pdata->dev;
229
230 /* Obtain the system clock setting */
231 pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
232 if (IS_ERR(pdata->sysclk)) {
233 dev_err(dev, "dma devm_clk_get failed\n");
234 return PTR_ERR(pdata->sysclk);
235 }
236 pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
237
238 /* Obtain the PTP clock setting */
239 pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
240 if (IS_ERR(pdata->ptpclk)) {
241 dev_err(dev, "ptp devm_clk_get failed\n");
242 return PTR_ERR(pdata->ptpclk);
243 }
244 pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
245
246 /* Retrieve the device cache coherency value */
247 pdata->coherent = of_dma_is_coherent(dev->of_node);
248
249 return 0;
250}
251#else /* CONFIG_OF */
252static int xgbe_of_support(struct xgbe_prv_data *pdata)
253{
254 return -EINVAL;
255}
256#endif /*CONFIG_OF */
257
164static int xgbe_probe(struct platform_device *pdev) 258static int xgbe_probe(struct platform_device *pdev)
165{ 259{
166 struct xgbe_prv_data *pdata; 260 struct xgbe_prv_data *pdata;
@@ -169,7 +263,7 @@ static int xgbe_probe(struct platform_device *pdev)
169 struct net_device *netdev; 263 struct net_device *netdev;
170 struct device *dev = &pdev->dev; 264 struct device *dev = &pdev->dev;
171 struct resource *res; 265 struct resource *res;
172 const u8 *mac_addr; 266 const char *phy_mode;
173 unsigned int i; 267 unsigned int i;
174 int ret; 268 int ret;
175 269
@@ -186,6 +280,7 @@ static int xgbe_probe(struct platform_device *pdev)
186 pdata = netdev_priv(netdev); 280 pdata = netdev_priv(netdev);
187 pdata->netdev = netdev; 281 pdata->netdev = netdev;
188 pdata->pdev = pdev; 282 pdata->pdev = pdev;
283 pdata->adev = ACPI_COMPANION(dev);
189 pdata->dev = dev; 284 pdata->dev = dev;
190 platform_set_drvdata(pdev, netdev); 285 platform_set_drvdata(pdev, netdev);
191 286
@@ -194,6 +289,9 @@ static int xgbe_probe(struct platform_device *pdev)
194 mutex_init(&pdata->rss_mutex); 289 mutex_init(&pdata->rss_mutex);
195 spin_lock_init(&pdata->tstamp_lock); 290 spin_lock_init(&pdata->tstamp_lock);
196 291
292 /* Check if we should use ACPI or DT */
293 pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
294
197 /* Set and validate the number of descriptors for a ring */ 295 /* Set and validate the number of descriptors for a ring */
198 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); 296 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
199 pdata->tx_desc_count = XGBE_TX_DESC_CNT; 297 pdata->tx_desc_count = XGBE_TX_DESC_CNT;
@@ -212,22 +310,6 @@ static int xgbe_probe(struct platform_device *pdev)
212 goto err_io; 310 goto err_io;
213 } 311 }
214 312
215 /* Obtain the system clock setting */
216 pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
217 if (IS_ERR(pdata->sysclk)) {
218 dev_err(dev, "dma devm_clk_get failed\n");
219 ret = PTR_ERR(pdata->sysclk);
220 goto err_io;
221 }
222
223 /* Obtain the PTP clock setting */
224 pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
225 if (IS_ERR(pdata->ptpclk)) {
226 dev_err(dev, "ptp devm_clk_get failed\n");
227 ret = PTR_ERR(pdata->ptpclk);
228 goto err_io;
229 }
230
231 /* Obtain the mmio areas for the device */ 313 /* Obtain the mmio areas for the device */
232 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 314 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
233 pdata->xgmac_regs = devm_ioremap_resource(dev, res); 315 pdata->xgmac_regs = devm_ioremap_resource(dev, res);
@@ -247,16 +329,42 @@ static int xgbe_probe(struct platform_device *pdev)
247 } 329 }
248 DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs); 330 DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
249 331
250 /* Set the DMA mask */ 332 /* Retrieve the MAC address */
251 if (!dev->dma_mask) 333 ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
252 dev->dma_mask = &dev->coherent_dma_mask; 334 pdata->mac_addr,
253 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); 335 sizeof(pdata->mac_addr));
254 if (ret) { 336 if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
255 dev_err(dev, "dma_set_mask_and_coherent failed\n"); 337 dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
338 if (!ret)
339 ret = -EINVAL;
256 goto err_io; 340 goto err_io;
257 } 341 }
258 342
259 if (of_property_read_bool(dev->of_node, "dma-coherent")) { 343 /* Retrieve the PHY mode - it must be "xgmii" */
344 ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
345 &phy_mode);
346 if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
347 dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
348 if (!ret)
349 ret = -EINVAL;
350 goto err_io;
351 }
352 pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
353
354 /* Check for per channel interrupt support */
355 if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
356 pdata->per_channel_irq = 1;
357
358 /* Obtain device settings unique to ACPI/OF */
359 if (pdata->use_acpi)
360 ret = xgbe_acpi_support(pdata);
361 else
362 ret = xgbe_of_support(pdata);
363 if (ret)
364 goto err_io;
365
366 /* Set the DMA coherency values */
367 if (pdata->coherent) {
260 pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; 368 pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
261 pdata->arcache = XGBE_DMA_OS_ARCACHE; 369 pdata->arcache = XGBE_DMA_OS_ARCACHE;
262 pdata->awcache = XGBE_DMA_OS_AWCACHE; 370 pdata->awcache = XGBE_DMA_OS_AWCACHE;
@@ -266,10 +374,16 @@ static int xgbe_probe(struct platform_device *pdev)
266 pdata->awcache = XGBE_DMA_SYS_AWCACHE; 374 pdata->awcache = XGBE_DMA_SYS_AWCACHE;
267 } 375 }
268 376
269 /* Check for per channel interrupt support */ 377 /* Set the DMA mask */
270 if (of_property_read_bool(dev->of_node, XGBE_DMA_IRQS)) 378 if (!dev->dma_mask)
271 pdata->per_channel_irq = 1; 379 dev->dma_mask = &dev->coherent_dma_mask;
380 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
381 if (ret) {
382 dev_err(dev, "dma_set_mask_and_coherent failed\n");
383 goto err_io;
384 }
272 385
386 /* Get the device interrupt */
273 ret = platform_get_irq(pdev, 0); 387 ret = platform_get_irq(pdev, 0);
274 if (ret < 0) { 388 if (ret < 0) {
275 dev_err(dev, "platform_get_irq 0 failed\n"); 389 dev_err(dev, "platform_get_irq 0 failed\n");
@@ -279,6 +393,7 @@ static int xgbe_probe(struct platform_device *pdev)
279 393
280 netdev->irq = pdata->dev_irq; 394 netdev->irq = pdata->dev_irq;
281 netdev->base_addr = (unsigned long)pdata->xgmac_regs; 395 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
396 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
282 397
283 /* Set all the function pointers */ 398 /* Set all the function pointers */
284 xgbe_init_all_fptrs(pdata); 399 xgbe_init_all_fptrs(pdata);
@@ -291,23 +406,6 @@ static int xgbe_probe(struct platform_device *pdev)
291 /* Populate the hardware features */ 406 /* Populate the hardware features */
292 xgbe_get_all_hw_features(pdata); 407 xgbe_get_all_hw_features(pdata);
293 408
294 /* Retrieve the MAC address */
295 mac_addr = of_get_mac_address(dev->of_node);
296 if (!mac_addr) {
297 dev_err(dev, "invalid mac address for this device\n");
298 ret = -EINVAL;
299 goto err_io;
300 }
301 memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
302
303 /* Retrieve the PHY mode - it must be "xgmii" */
304 pdata->phy_mode = of_get_phy_mode(dev->of_node);
305 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
306 dev_err(dev, "invalid phy-mode specified for this device\n");
307 ret = -EINVAL;
308 goto err_io;
309 }
310
311 /* Set default configuration data */ 409 /* Set default configuration data */
312 xgbe_default_config(pdata); 410 xgbe_default_config(pdata);
313 411
@@ -491,18 +589,35 @@ static int xgbe_resume(struct device *dev)
491} 589}
492#endif /* CONFIG_PM */ 590#endif /* CONFIG_PM */
493 591
592#ifdef CONFIG_ACPI
593static const struct acpi_device_id xgbe_acpi_match[] = {
594 { "AMDI8001", 0 },
595 {},
596};
597
598MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
599#endif
600
601#ifdef CONFIG_OF
494static const struct of_device_id xgbe_of_match[] = { 602static const struct of_device_id xgbe_of_match[] = {
495 { .compatible = "amd,xgbe-seattle-v1a", }, 603 { .compatible = "amd,xgbe-seattle-v1a", },
496 {}, 604 {},
497}; 605};
498 606
499MODULE_DEVICE_TABLE(of, xgbe_of_match); 607MODULE_DEVICE_TABLE(of, xgbe_of_match);
608#endif
609
500static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume); 610static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
501 611
502static struct platform_driver xgbe_driver = { 612static struct platform_driver xgbe_driver = {
503 .driver = { 613 .driver = {
504 .name = "amd-xgbe", 614 .name = "amd-xgbe",
615#ifdef CONFIG_ACPI
616 .acpi_match_table = xgbe_acpi_match,
617#endif
618#ifdef CONFIG_OF
505 .of_match_table = xgbe_of_match, 619 .of_match_table = xgbe_of_match,
620#endif
506 .pm = &xgbe_pm_ops, 621 .pm = &xgbe_pm_ops,
507 }, 622 },
508 .probe = xgbe_probe, 623 .probe = xgbe_probe,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 363b210560f3..59e267f3f1b7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -205,25 +205,16 @@ void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
205 205
206int xgbe_mdio_register(struct xgbe_prv_data *pdata) 206int xgbe_mdio_register(struct xgbe_prv_data *pdata)
207{ 207{
208 struct device_node *phy_node;
209 struct mii_bus *mii; 208 struct mii_bus *mii;
210 struct phy_device *phydev; 209 struct phy_device *phydev;
211 int ret = 0; 210 int ret = 0;
212 211
213 DBGPR("-->xgbe_mdio_register\n"); 212 DBGPR("-->xgbe_mdio_register\n");
214 213
215 /* Retrieve the phy-handle */
216 phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
217 if (!phy_node) {
218 dev_err(pdata->dev, "unable to parse phy-handle\n");
219 return -EINVAL;
220 }
221
222 mii = mdiobus_alloc(); 214 mii = mdiobus_alloc();
223 if (mii == NULL) { 215 if (!mii) {
224 dev_err(pdata->dev, "mdiobus_alloc failed\n"); 216 dev_err(pdata->dev, "mdiobus_alloc failed\n");
225 ret = -ENOMEM; 217 return -ENOMEM;
226 goto err_node_get;
227 } 218 }
228 219
229 /* Register on the MDIO bus (don't probe any PHYs) */ 220 /* Register on the MDIO bus (don't probe any PHYs) */
@@ -252,18 +243,19 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
252 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, 243 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
253 MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS])); 244 MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
254 245
255 of_node_get(phy_node);
256 phydev->dev.of_node = phy_node;
257 ret = phy_device_register(phydev); 246 ret = phy_device_register(phydev);
258 if (ret) { 247 if (ret) {
259 dev_err(pdata->dev, "phy_device_register failed\n"); 248 dev_err(pdata->dev, "phy_device_register failed\n");
260 of_node_put(phy_node); 249 goto err_phy_device;
250 }
251 if (!phydev->dev.driver) {
252 dev_err(pdata->dev, "phy driver probe failed\n");
253 ret = -EIO;
261 goto err_phy_device; 254 goto err_phy_device;
262 } 255 }
263 256
264 /* Add a reference to the PHY driver so it can't be unloaded */ 257 /* Add a reference to the PHY driver so it can't be unloaded */
265 pdata->phy_module = phydev->dev.driver ? 258 pdata->phy_module = phydev->dev.driver->owner;
266 phydev->dev.driver->owner : NULL;
267 if (!try_module_get(pdata->phy_module)) { 259 if (!try_module_get(pdata->phy_module)) {
268 dev_err(pdata->dev, "try_module_get failed\n"); 260 dev_err(pdata->dev, "try_module_get failed\n");
269 ret = -EIO; 261 ret = -EIO;
@@ -283,8 +275,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
283 275
284 pdata->phydev = phydev; 276 pdata->phydev = phydev;
285 277
286 of_node_put(phy_node);
287
288 DBGPHY_REGS(pdata); 278 DBGPHY_REGS(pdata);
289 279
290 DBGPR("<--xgbe_mdio_register\n"); 280 DBGPR("<--xgbe_mdio_register\n");
@@ -300,9 +290,6 @@ err_mdiobus_register:
300err_mdiobus_alloc: 290err_mdiobus_alloc:
301 mdiobus_free(mii); 291 mdiobus_free(mii);
302 292
303err_node_get:
304 of_node_put(phy_node);
305
306 return ret; 293 return ret;
307} 294}
308 295
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index a1bf9d1cdae1..f326178ef376 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -171,15 +171,9 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
171 struct xgbe_prv_data, 171 struct xgbe_prv_data,
172 ptp_clock_info); 172 ptp_clock_info);
173 unsigned long flags; 173 unsigned long flags;
174 u64 nsec;
175 174
176 spin_lock_irqsave(&pdata->tstamp_lock, flags); 175 spin_lock_irqsave(&pdata->tstamp_lock, flags);
177 176 timecounter_adjtime(&pdata->tstamp_tc, delta);
178 nsec = timecounter_read(&pdata->tstamp_tc);
179
180 nsec += delta;
181 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
182
183 spin_unlock_irqrestore(&pdata->tstamp_lock, flags); 177 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
184 178
185 return 0; 179 return 0;
@@ -239,7 +233,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
239 snprintf(info->name, sizeof(info->name), "%s", 233 snprintf(info->name, sizeof(info->name), "%s",
240 netdev_name(pdata->netdev)); 234 netdev_name(pdata->netdev));
241 info->owner = THIS_MODULE; 235 info->owner = THIS_MODULE;
242 info->max_adj = clk_get_rate(pdata->ptpclk); 236 info->max_adj = pdata->ptpclk_rate;
243 info->adjfreq = xgbe_adjfreq; 237 info->adjfreq = xgbe_adjfreq;
244 info->adjtime = xgbe_adjtime; 238 info->adjtime = xgbe_adjtime;
245 info->gettime = xgbe_gettime; 239 info->gettime = xgbe_gettime;
@@ -260,7 +254,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
260 */ 254 */
261 dividend = 50000000; 255 dividend = 50000000;
262 dividend <<= 32; 256 dividend <<= 32;
263 pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk)); 257 pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
264 258
265 /* Setup the timecounter */ 259 /* Setup the timecounter */
266 cc->read = xgbe_cc_read; 260 cc->read = xgbe_cc_read;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f9ec762ac3f0..13e8f95c077c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -124,7 +124,7 @@
124#include <linux/if_vlan.h> 124#include <linux/if_vlan.h>
125#include <linux/bitops.h> 125#include <linux/bitops.h>
126#include <linux/ptp_clock_kernel.h> 126#include <linux/ptp_clock_kernel.h>
127#include <linux/clocksource.h> 127#include <linux/timecounter.h>
128#include <linux/net_tstamp.h> 128#include <linux/net_tstamp.h>
129#include <net/dcbnl.h> 129#include <net/dcbnl.h>
130 130
@@ -182,10 +182,18 @@
182#define XGBE_PHY_NAME "amd_xgbe_phy" 182#define XGBE_PHY_NAME "amd_xgbe_phy"
183#define XGBE_PRTAD 0 183#define XGBE_PRTAD 0
184 184
185/* Common property names */
186#define XGBE_MAC_ADDR_PROPERTY "mac-address"
187#define XGBE_PHY_MODE_PROPERTY "phy-mode"
188#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
189
185/* Device-tree clock names */ 190/* Device-tree clock names */
186#define XGBE_DMA_CLOCK "dma_clk" 191#define XGBE_DMA_CLOCK "dma_clk"
187#define XGBE_PTP_CLOCK "ptp_clk" 192#define XGBE_PTP_CLOCK "ptp_clk"
188#define XGBE_DMA_IRQS "amd,per-channel-interrupt" 193
194/* ACPI property names */
195#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
196#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
189 197
190/* Timestamp support - values based on 50MHz PTP clock 198/* Timestamp support - values based on 50MHz PTP clock
191 * 50MHz => 20 nsec 199 * 50MHz => 20 nsec
@@ -361,8 +369,7 @@ struct xgbe_ring {
361 * cur - Tx: index of descriptor to be used for current transfer 369 * cur - Tx: index of descriptor to be used for current transfer
362 * Rx: index of descriptor to check for packet availability 370 * Rx: index of descriptor to check for packet availability
363 * dirty - Tx: index of descriptor to check for transfer complete 371 * dirty - Tx: index of descriptor to check for transfer complete
364 * Rx: count of descriptors in which a packet has been received 372 * Rx: index of descriptor to check for buffer reallocation
365 * (used with skb_realloc_index to refresh the ring)
366 */ 373 */
367 unsigned int cur; 374 unsigned int cur;
368 unsigned int dirty; 375 unsigned int dirty;
@@ -377,11 +384,6 @@ struct xgbe_ring {
377 unsigned short cur_mss; 384 unsigned short cur_mss;
378 unsigned short cur_vlan_ctag; 385 unsigned short cur_vlan_ctag;
379 } tx; 386 } tx;
380
381 struct {
382 unsigned int realloc_index;
383 unsigned int realloc_threshold;
384 } rx;
385 }; 387 };
386} ____cacheline_aligned; 388} ____cacheline_aligned;
387 389
@@ -596,7 +598,8 @@ struct xgbe_desc_if {
596 int (*alloc_ring_resources)(struct xgbe_prv_data *); 598 int (*alloc_ring_resources)(struct xgbe_prv_data *);
597 void (*free_ring_resources)(struct xgbe_prv_data *); 599 void (*free_ring_resources)(struct xgbe_prv_data *);
598 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *); 600 int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
599 void (*realloc_rx_buffer)(struct xgbe_channel *); 601 int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
602 struct xgbe_ring_data *);
600 void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *); 603 void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
601 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); 604 void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
602 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); 605 void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
@@ -650,8 +653,12 @@ struct xgbe_hw_features {
650struct xgbe_prv_data { 653struct xgbe_prv_data {
651 struct net_device *netdev; 654 struct net_device *netdev;
652 struct platform_device *pdev; 655 struct platform_device *pdev;
656 struct acpi_device *adev;
653 struct device *dev; 657 struct device *dev;
654 658
659 /* ACPI or DT flag */
660 unsigned int use_acpi;
661
655 /* XGMAC/XPCS related mmio registers */ 662 /* XGMAC/XPCS related mmio registers */
656 void __iomem *xgmac_regs; /* XGMAC CSRs */ 663 void __iomem *xgmac_regs; /* XGMAC CSRs */
657 void __iomem *xpcs_regs; /* XPCS MMD registers */ 664 void __iomem *xpcs_regs; /* XPCS MMD registers */
@@ -672,6 +679,7 @@ struct xgbe_prv_data {
672 struct xgbe_desc_if desc_if; 679 struct xgbe_desc_if desc_if;
673 680
674 /* AXI DMA settings */ 681 /* AXI DMA settings */
682 unsigned int coherent;
675 unsigned int axdomain; 683 unsigned int axdomain;
676 unsigned int arcache; 684 unsigned int arcache;
677 unsigned int awcache; 685 unsigned int awcache;
@@ -739,6 +747,7 @@ struct xgbe_prv_data {
739 unsigned int phy_rx_pause; 747 unsigned int phy_rx_pause;
740 748
741 /* Netdev related settings */ 749 /* Netdev related settings */
750 unsigned char mac_addr[ETH_ALEN];
742 netdev_features_t netdev_features; 751 netdev_features_t netdev_features;
743 struct napi_struct napi; 752 struct napi_struct napi;
744 struct xgbe_mmc_stats mmc_stats; 753 struct xgbe_mmc_stats mmc_stats;
@@ -748,7 +757,9 @@ struct xgbe_prv_data {
748 757
749 /* Device clocks */ 758 /* Device clocks */
750 struct clk *sysclk; 759 struct clk *sysclk;
760 unsigned long sysclk_rate;
751 struct clk *ptpclk; 761 struct clk *ptpclk;
762 unsigned long ptpclk_rate;
752 763
753 /* Timestamp support */ 764 /* Timestamp support */
754 spinlock_t tstamp_lock; 765 spinlock_t tstamp_lock;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 7ba83ffb08ac..869d97fcf781 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -593,10 +593,12 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
593 if (!xgene_ring_mgr_init(pdata)) 593 if (!xgene_ring_mgr_init(pdata))
594 return -ENODEV; 594 return -ENODEV;
595 595
596 clk_prepare_enable(pdata->clk); 596 if (!efi_enabled(EFI_BOOT)) {
597 clk_disable_unprepare(pdata->clk); 597 clk_prepare_enable(pdata->clk);
598 clk_prepare_enable(pdata->clk); 598 clk_disable_unprepare(pdata->clk);
599 xgene_enet_ecc_init(pdata); 599 clk_prepare_enable(pdata->clk);
600 xgene_enet_ecc_init(pdata);
601 }
600 xgene_enet_config_ring_if_assoc(pdata); 602 xgene_enet_config_ring_if_assoc(pdata);
601 603
602 /* Enable auto-incr for scanning */ 604 /* Enable auto-incr for scanning */
@@ -663,15 +665,20 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
663 struct phy_device *phy_dev; 665 struct phy_device *phy_dev;
664 struct device *dev = &pdata->pdev->dev; 666 struct device *dev = &pdata->pdev->dev;
665 667
666 phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); 668 if (dev->of_node) {
667 if (!phy_np) { 669 phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
668 netdev_dbg(ndev, "No phy-handle found\n"); 670 if (!phy_np) {
669 return -ENODEV; 671 netdev_dbg(ndev, "No phy-handle found in DT\n");
672 return -ENODEV;
673 }
674 pdata->phy_dev = of_phy_find_device(phy_np);
670 } 675 }
671 676
672 phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, 677 phy_dev = pdata->phy_dev;
673 0, pdata->phy_mode); 678
674 if (!phy_dev) { 679 if (!phy_dev ||
680 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
681 pdata->phy_mode)) {
675 netdev_err(ndev, "Could not connect to PHY\n"); 682 netdev_err(ndev, "Could not connect to PHY\n");
676 return -ENODEV; 683 return -ENODEV;
677 } 684 }
@@ -681,32 +688,71 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
681 ~SUPPORTED_100baseT_Half & 688 ~SUPPORTED_100baseT_Half &
682 ~SUPPORTED_1000baseT_Half; 689 ~SUPPORTED_1000baseT_Half;
683 phy_dev->advertising = phy_dev->supported; 690 phy_dev->advertising = phy_dev->supported;
684 pdata->phy_dev = phy_dev;
685 691
686 return 0; 692 return 0;
687} 693}
688 694
689int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) 695static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
696 struct mii_bus *mdio)
690{ 697{
691 struct net_device *ndev = pdata->ndev;
692 struct device *dev = &pdata->pdev->dev; 698 struct device *dev = &pdata->pdev->dev;
699 struct net_device *ndev = pdata->ndev;
700 struct phy_device *phy;
693 struct device_node *child_np; 701 struct device_node *child_np;
694 struct device_node *mdio_np = NULL; 702 struct device_node *mdio_np = NULL;
695 struct mii_bus *mdio_bus;
696 int ret; 703 int ret;
704 u32 phy_id;
705
706 if (dev->of_node) {
707 for_each_child_of_node(dev->of_node, child_np) {
708 if (of_device_is_compatible(child_np,
709 "apm,xgene-mdio")) {
710 mdio_np = child_np;
711 break;
712 }
713 }
697 714
698 for_each_child_of_node(dev->of_node, child_np) { 715 if (!mdio_np) {
699 if (of_device_is_compatible(child_np, "apm,xgene-mdio")) { 716 netdev_dbg(ndev, "No mdio node in the dts\n");
700 mdio_np = child_np; 717 return -ENXIO;
701 break;
702 } 718 }
703 }
704 719
705 if (!mdio_np) { 720 return of_mdiobus_register(mdio, mdio_np);
706 netdev_dbg(ndev, "No mdio node in the dts\n");
707 return -ENXIO;
708 } 721 }
709 722
723 /* Mask out all PHYs from auto probing. */
724 mdio->phy_mask = ~0;
725
726 /* Register the MDIO bus */
727 ret = mdiobus_register(mdio);
728 if (ret)
729 return ret;
730
731 ret = device_property_read_u32(dev, "phy-channel", &phy_id);
732 if (ret)
733 ret = device_property_read_u32(dev, "phy-addr", &phy_id);
734 if (ret)
735 return -EINVAL;
736
737 phy = get_phy_device(mdio, phy_id, true);
738 if (!phy || IS_ERR(phy))
739 return -EIO;
740
741 ret = phy_device_register(phy);
742 if (ret)
743 phy_device_free(phy);
744 else
745 pdata->phy_dev = phy;
746
747 return ret;
748}
749
750int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
751{
752 struct net_device *ndev = pdata->ndev;
753 struct mii_bus *mdio_bus;
754 int ret;
755
710 mdio_bus = mdiobus_alloc(); 756 mdio_bus = mdiobus_alloc();
711 if (!mdio_bus) 757 if (!mdio_bus)
712 return -ENOMEM; 758 return -ENOMEM;
@@ -720,7 +766,7 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
720 mdio_bus->priv = pdata; 766 mdio_bus->priv = pdata;
721 mdio_bus->parent = &ndev->dev; 767 mdio_bus->parent = &ndev->dev;
722 768
723 ret = of_mdiobus_register(mdio_bus, mdio_np); 769 ret = xgene_mdiobus_register(pdata, mdio_bus);
724 if (ret) { 770 if (ret) {
725 netdev_err(ndev, "Failed to register MDIO bus\n"); 771 netdev_err(ndev, "Failed to register MDIO bus\n");
726 mdiobus_free(mdio_bus); 772 mdiobus_free(mdio_bus);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 793f3b73eeff..44b15373d6b3 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -24,6 +24,10 @@
24#include "xgene_enet_sgmac.h" 24#include "xgene_enet_sgmac.h"
25#include "xgene_enet_xgmac.h" 25#include "xgene_enet_xgmac.h"
26 26
27#define RES_ENET_CSR 0
28#define RES_RING_CSR 1
29#define RES_RING_CMD 2
30
27static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) 31static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
28{ 32{
29 struct xgene_enet_raw_desc16 *raw_desc; 33 struct xgene_enet_raw_desc16 *raw_desc;
@@ -748,6 +752,41 @@ static const struct net_device_ops xgene_ndev_ops = {
748 .ndo_set_mac_address = xgene_enet_set_mac_address, 752 .ndo_set_mac_address = xgene_enet_set_mac_address,
749}; 753};
750 754
755static int xgene_get_mac_address(struct device *dev,
756 unsigned char *addr)
757{
758 int ret;
759
760 ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
761 if (ret)
762 ret = device_property_read_u8_array(dev, "mac-address",
763 addr, 6);
764 if (ret)
765 return -ENODEV;
766
767 return ETH_ALEN;
768}
769
770static int xgene_get_phy_mode(struct device *dev)
771{
772 int i, ret;
773 char *modestr;
774
775 ret = device_property_read_string(dev, "phy-connection-type",
776 (const char **)&modestr);
777 if (ret)
778 ret = device_property_read_string(dev, "phy-mode",
779 (const char **)&modestr);
780 if (ret)
781 return -ENODEV;
782
783 for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
784 if (!strcasecmp(modestr, phy_modes(i)))
785 return i;
786 }
787 return -ENODEV;
788}
789
751static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) 790static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
752{ 791{
753 struct platform_device *pdev; 792 struct platform_device *pdev;
@@ -755,32 +794,45 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
755 struct device *dev; 794 struct device *dev;
756 struct resource *res; 795 struct resource *res;
757 void __iomem *base_addr; 796 void __iomem *base_addr;
758 const char *mac;
759 int ret; 797 int ret;
760 798
761 pdev = pdata->pdev; 799 pdev = pdata->pdev;
762 dev = &pdev->dev; 800 dev = &pdev->dev;
763 ndev = pdata->ndev; 801 ndev = pdata->ndev;
764 802
765 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr"); 803 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
766 pdata->base_addr = devm_ioremap_resource(dev, res); 804 if (!res) {
767 if (IS_ERR(pdata->base_addr)) { 805 dev_err(dev, "Resource enet_csr not defined\n");
806 return -ENODEV;
807 }
808 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
809 if (!pdata->base_addr) {
768 dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); 810 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
769 return PTR_ERR(pdata->base_addr); 811 return -ENOMEM;
770 } 812 }
771 813
772 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr"); 814 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
773 pdata->ring_csr_addr = devm_ioremap_resource(dev, res); 815 if (!res) {
774 if (IS_ERR(pdata->ring_csr_addr)) { 816 dev_err(dev, "Resource ring_csr not defined\n");
817 return -ENODEV;
818 }
819 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
820 resource_size(res));
821 if (!pdata->ring_csr_addr) {
775 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); 822 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
776 return PTR_ERR(pdata->ring_csr_addr); 823 return -ENOMEM;
777 } 824 }
778 825
779 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd"); 826 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
780 pdata->ring_cmd_addr = devm_ioremap_resource(dev, res); 827 if (!res) {
781 if (IS_ERR(pdata->ring_cmd_addr)) { 828 dev_err(dev, "Resource ring_cmd not defined\n");
829 return -ENODEV;
830 }
831 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
832 resource_size(res));
833 if (!pdata->ring_cmd_addr) {
782 dev_err(dev, "Unable to retrieve ENET Ring command region\n"); 834 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
783 return PTR_ERR(pdata->ring_cmd_addr); 835 return -ENOMEM;
784 } 836 }
785 837
786 ret = platform_get_irq(pdev, 0); 838 ret = platform_get_irq(pdev, 0);
@@ -791,14 +843,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
791 } 843 }
792 pdata->rx_irq = ret; 844 pdata->rx_irq = ret;
793 845
794 mac = of_get_mac_address(dev->of_node); 846 if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
795 if (mac)
796 memcpy(ndev->dev_addr, mac, ndev->addr_len);
797 else
798 eth_hw_addr_random(ndev); 847 eth_hw_addr_random(ndev);
848
799 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 849 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
800 850
801 pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node); 851 pdata->phy_mode = xgene_get_phy_mode(dev);
802 if (pdata->phy_mode < 0) { 852 if (pdata->phy_mode < 0) {
803 dev_err(dev, "Unable to get phy-connection-type\n"); 853 dev_err(dev, "Unable to get phy-connection-type\n");
804 return pdata->phy_mode; 854 return pdata->phy_mode;
@@ -811,11 +861,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
811 } 861 }
812 862
813 pdata->clk = devm_clk_get(&pdev->dev, NULL); 863 pdata->clk = devm_clk_get(&pdev->dev, NULL);
814 ret = IS_ERR(pdata->clk);
815 if (IS_ERR(pdata->clk)) { 864 if (IS_ERR(pdata->clk)) {
816 dev_err(&pdev->dev, "can't get clock\n"); 865 /* Firmware may have set up the clock already. */
817 ret = PTR_ERR(pdata->clk); 866 pdata->clk = NULL;
818 return ret;
819 } 867 }
820 868
821 base_addr = pdata->base_addr; 869 base_addr = pdata->base_addr;
@@ -926,7 +974,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
926 goto err; 974 goto err;
927 } 975 }
928 976
929 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 977 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
930 if (ret) { 978 if (ret) {
931 netdev_err(ndev, "No usable DMA configuration\n"); 979 netdev_err(ndev, "No usable DMA configuration\n");
932 goto err; 980 goto err;
@@ -974,17 +1022,26 @@ static int xgene_enet_remove(struct platform_device *pdev)
974 return 0; 1022 return 0;
975} 1023}
976 1024
977static struct of_device_id xgene_enet_match[] = { 1025#ifdef CONFIG_ACPI
1026static const struct acpi_device_id xgene_enet_acpi_match[] = {
1027 { "APMC0D05", },
1028 { }
1029};
1030MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1031#endif
1032
1033static struct of_device_id xgene_enet_of_match[] = {
978 {.compatible = "apm,xgene-enet",}, 1034 {.compatible = "apm,xgene-enet",},
979 {}, 1035 {},
980}; 1036};
981 1037
982MODULE_DEVICE_TABLE(of, xgene_enet_match); 1038MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
983 1039
984static struct platform_driver xgene_enet_driver = { 1040static struct platform_driver xgene_enet_driver = {
985 .driver = { 1041 .driver = {
986 .name = "xgene-enet", 1042 .name = "xgene-enet",
987 .of_match_table = xgene_enet_match, 1043 .of_match_table = of_match_ptr(xgene_enet_of_match),
1044 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
988 }, 1045 },
989 .probe = xgene_enet_probe, 1046 .probe = xgene_enet_probe,
990 .remove = xgene_enet_remove, 1047 .remove = xgene_enet_remove,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index f9958fae6ffd..c2d465c3db66 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -22,7 +22,10 @@
22#ifndef __XGENE_ENET_MAIN_H__ 22#ifndef __XGENE_ENET_MAIN_H__
23#define __XGENE_ENET_MAIN_H__ 23#define __XGENE_ENET_MAIN_H__
24 24
25#include <linux/acpi.h>
25#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/efi.h>
28#include <linux/io.h>
26#include <linux/of_platform.h> 29#include <linux/of_platform.h>
27#include <linux/of_net.h> 30#include <linux/of_net.h>
28#include <linux/of_mdio.h> 31#include <linux/of_mdio.h>
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index c9946c6c119e..587f63e87588 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2235,8 +2235,8 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
2235 return NETDEV_TX_OK; 2235 return NETDEV_TX_OK;
2236 } 2236 }
2237 2237
2238 if (unlikely(vlan_tx_tag_present(skb))) { 2238 if (unlikely(skb_vlan_tag_present(skb))) {
2239 u16 vlan = vlan_tx_tag_get(skb); 2239 u16 vlan = skb_vlan_tag_get(skb);
2240 __le16 tag; 2240 __le16 tag;
2241 2241
2242 vlan = cpu_to_le16(vlan); 2242 vlan = cpu_to_le16(vlan);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 2326579f9454..59a03a193e83 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1892,8 +1892,8 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
1892 1892
1893 tpd = atl1e_get_tpd(adapter); 1893 tpd = atl1e_get_tpd(adapter);
1894 1894
1895 if (vlan_tx_tag_present(skb)) { 1895 if (skb_vlan_tag_present(skb)) {
1896 u16 vlan_tag = vlan_tx_tag_get(skb); 1896 u16 vlan_tag = skb_vlan_tag_get(skb);
1897 u16 atl1e_vlan_tag; 1897 u16 atl1e_vlan_tag;
1898 1898
1899 tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; 1899 tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
@@ -2373,9 +2373,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2373 2373
2374 netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64); 2374 netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
2375 2375
2376 init_timer(&adapter->phy_config_timer); 2376 setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
2377 adapter->phy_config_timer.function = atl1e_phy_config; 2377 (unsigned long)adapter);
2378 adapter->phy_config_timer.data = (unsigned long) adapter;
2379 2378
2380 /* get user settings */ 2379 /* get user settings */
2381 atl1e_check_options(adapter); 2380 atl1e_check_options(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 2c8f398aeda9..eca1d113fee1 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2415,8 +2415,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2415 (u16) atomic_read(&tpd_ring->next_to_use)); 2415 (u16) atomic_read(&tpd_ring->next_to_use));
2416 memset(ptpd, 0, sizeof(struct tx_packet_desc)); 2416 memset(ptpd, 0, sizeof(struct tx_packet_desc));
2417 2417
2418 if (vlan_tx_tag_present(skb)) { 2418 if (skb_vlan_tag_present(skb)) {
2419 vlan_tag = vlan_tx_tag_get(skb); 2419 vlan_tag = skb_vlan_tag_get(skb);
2420 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | 2420 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
2421 ((vlan_tag >> 9) & 0x8); 2421 ((vlan_tag >> 9) & 0x8);
2422 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; 2422 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 84a09e8ddd9c..46a535318c7a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -887,8 +887,8 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
887 offset = ((u32)(skb->len-copy_len + 3) & ~3); 887 offset = ((u32)(skb->len-copy_len + 3) & ~3);
888 } 888 }
889#ifdef NETIF_F_HW_VLAN_CTAG_TX 889#ifdef NETIF_F_HW_VLAN_CTAG_TX
890 if (vlan_tx_tag_present(skb)) { 890 if (skb_vlan_tag_present(skb)) {
891 u16 vlan_tag = vlan_tx_tag_get(skb); 891 u16 vlan_tag = skb_vlan_tag_get(skb);
892 vlan_tag = (vlan_tag << 4) | 892 vlan_tag = (vlan_tag << 4) |
893 (vlan_tag >> 13) | 893 (vlan_tag >> 13) |
894 ((vlan_tag >> 9) & 0x8); 894 ((vlan_tag >> 9) & 0x8);
@@ -1436,13 +1436,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1436 1436
1437 atl2_check_options(adapter); 1437 atl2_check_options(adapter);
1438 1438
1439 init_timer(&adapter->watchdog_timer); 1439 setup_timer(&adapter->watchdog_timer, atl2_watchdog,
1440 adapter->watchdog_timer.function = atl2_watchdog; 1440 (unsigned long)adapter);
1441 adapter->watchdog_timer.data = (unsigned long) adapter;
1442 1441
1443 init_timer(&adapter->phy_config_timer); 1442 setup_timer(&adapter->phy_config_timer, atl2_phy_config,
1444 adapter->phy_config_timer.function = atl2_phy_config; 1443 (unsigned long)adapter);
1445 adapter->phy_config_timer.data = (unsigned long) adapter;
1446 1444
1447 INIT_WORK(&adapter->reset_task, atl2_reset_task); 1445 INIT_WORK(&adapter->reset_task, atl2_reset_task);
1448 INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task); 1446 INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 823d01c5684c..02bf0b86995b 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6597,9 +6597,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6597 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 6597 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6598 } 6598 }
6599 6599
6600 if (vlan_tx_tag_present(skb)) { 6600 if (skb_vlan_tag_present(skb)) {
6601 vlan_tag_flags |= 6601 vlan_tag_flags |=
6602 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); 6602 (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6603 } 6603 }
6604 6604
6605 if ((mss = skb_shinfo(skb)->gso_size)) { 6605 if ((mss = skb_shinfo(skb)->gso_size)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index c3a6072134f5..756053c028be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -22,7 +22,7 @@
22 22
23#include <linux/ptp_clock_kernel.h> 23#include <linux/ptp_clock_kernel.h>
24#include <linux/net_tstamp.h> 24#include <linux/net_tstamp.h>
25#include <linux/clocksource.h> 25#include <linux/timecounter.h>
26 26
27/* compilation time flags */ 27/* compilation time flags */
28 28
@@ -1138,12 +1138,8 @@ struct bnx2x_port {
1138 u32 link_config[LINK_CONFIG_SIZE]; 1138 u32 link_config[LINK_CONFIG_SIZE];
1139 1139
1140 u32 supported[LINK_CONFIG_SIZE]; 1140 u32 supported[LINK_CONFIG_SIZE];
1141/* link settings - missing defines */
1142#define SUPPORTED_2500baseX_Full (1 << 15)
1143 1141
1144 u32 advertising[LINK_CONFIG_SIZE]; 1142 u32 advertising[LINK_CONFIG_SIZE];
1145/* link settings - missing defines */
1146#define ADVERTISED_2500baseX_Full (1 << 15)
1147 1143
1148 u32 phy_addr; 1144 u32 phy_addr;
1149 1145
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e468ed3f210f..0a9faa134a9a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3865,9 +3865,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3865 "sending pkt %u @%p next_idx %u bd %u @%p\n", 3865 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3866 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); 3866 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3867 3867
3868 if (vlan_tx_tag_present(skb)) { 3868 if (skb_vlan_tag_present(skb)) {
3869 tx_start_bd->vlan_or_ethertype = 3869 tx_start_bd->vlan_or_ethertype =
3870 cpu_to_le16(vlan_tx_tag_get(skb)); 3870 cpu_to_le16(skb_vlan_tag_get(skb));
3871 tx_start_bd->bd_flags.as_bitfield |= 3871 tx_start_bd->bd_flags.as_bitfield |=
3872 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 3872 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3873 } else { 3873 } else {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 72eef9fc883e..7155e1d2c208 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9169,7 +9169,7 @@ static void bnx2x_disable_ptp(struct bnx2x *bp)
9169} 9169}
9170 9170
9171/* Called during unload, to stop PTP-related stuff */ 9171/* Called during unload, to stop PTP-related stuff */
9172void bnx2x_stop_ptp(struct bnx2x *bp) 9172static void bnx2x_stop_ptp(struct bnx2x *bp)
9173{ 9173{
9174 /* Cancel PTP work queue. Should be done after the Tx queues are 9174 /* Cancel PTP work queue. Should be done after the Tx queues are
9175 * drained to prevent additional scheduling. 9175 * drained to prevent additional scheduling.
@@ -13267,14 +13267,10 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13267static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 13267static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13268{ 13268{
13269 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); 13269 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13270 u64 now;
13271 13270
13272 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); 13271 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13273 13272
13274 now = timecounter_read(&bp->timecounter); 13273 timecounter_adjtime(&bp->timecounter, delta);
13275 now += delta;
13276 /* Re-init the timecounter */
13277 timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
13278 13274
13279 return 0; 13275 return 0;
13280} 13276}
@@ -13322,7 +13318,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13322 return -ENOTSUPP; 13318 return -ENOTSUPP;
13323} 13319}
13324 13320
13325void bnx2x_register_phc(struct bnx2x *bp) 13321static void bnx2x_register_phc(struct bnx2x *bp)
13326{ 13322{
13327 /* Fill the ptp_clock_info struct and register PTP clock*/ 13323 /* Fill the ptp_clock_info struct and register PTP clock*/
13328 bp->ptp_clock_info.owner = THIS_MODULE; 13324 bp->ptp_clock_info.owner = THIS_MODULE;
@@ -14614,7 +14610,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
14614{ 14610{
14615 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); 14611 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
14616 bp->cyclecounter.read = bnx2x_cyclecounter_read; 14612 bp->cyclecounter.read = bnx2x_cyclecounter_read;
14617 bp->cyclecounter.mask = CLOCKSOURCE_MASK(64); 14613 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
14618 bp->cyclecounter.shift = 1; 14614 bp->cyclecounter.shift = 1;
14619 bp->cyclecounter.mult = 1; 14615 bp->cyclecounter.mult = 1;
14620} 14616}
@@ -14639,7 +14635,7 @@ static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
14639 return bnx2x_func_state_change(bp, &func_params); 14635 return bnx2x_func_state_change(bp, &func_params);
14640} 14636}
14641 14637
14642int bnx2x_enable_ptp_packets(struct bnx2x *bp) 14638static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
14643{ 14639{
14644 struct bnx2x_queue_state_params q_params; 14640 struct bnx2x_queue_state_params q_params;
14645 int rc, i; 14641 int rc, i;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 96bf01ba32dd..615a6dbde047 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8008,9 +8008,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8008 !mss && skb->len > VLAN_ETH_FRAME_LEN) 8008 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8009 base_flags |= TXD_FLAG_JMB_PKT; 8009 base_flags |= TXD_FLAG_JMB_PKT;
8010 8010
8011 if (vlan_tx_tag_present(skb)) { 8011 if (skb_vlan_tag_present(skb)) {
8012 base_flags |= TXD_FLAG_VLAN; 8012 base_flags |= TXD_FLAG_VLAN;
8013 vlan = vlan_tx_tag_get(skb); 8013 vlan = skb_vlan_tag_get(skb);
8014 } 8014 }
8015 8015
8016 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && 8016 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
@@ -11573,11 +11573,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11573 tg3_flag_set(tp, INIT_COMPLETE); 11573 tg3_flag_set(tp, INIT_COMPLETE);
11574 tg3_enable_ints(tp); 11574 tg3_enable_ints(tp);
11575 11575
11576 if (init) 11576 tg3_ptp_resume(tp);
11577 tg3_ptp_init(tp);
11578 else
11579 tg3_ptp_resume(tp);
11580
11581 11577
11582 tg3_full_unlock(tp); 11578 tg3_full_unlock(tp);
11583 11579
@@ -11698,13 +11694,6 @@ static int tg3_open(struct net_device *dev)
11698 pci_set_power_state(tp->pdev, PCI_D3hot); 11694 pci_set_power_state(tp->pdev, PCI_D3hot);
11699 } 11695 }
11700 11696
11701 if (tg3_flag(tp, PTP_CAPABLE)) {
11702 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11703 &tp->pdev->dev);
11704 if (IS_ERR(tp->ptp_clock))
11705 tp->ptp_clock = NULL;
11706 }
11707
11708 return err; 11697 return err;
11709} 11698}
11710 11699
@@ -11718,8 +11707,6 @@ static int tg3_close(struct net_device *dev)
11718 return -EAGAIN; 11707 return -EAGAIN;
11719 } 11708 }
11720 11709
11721 tg3_ptp_fini(tp);
11722
11723 tg3_stop(tp); 11710 tg3_stop(tp);
11724 11711
11725 /* Clear stats across close / open calls */ 11712 /* Clear stats across close / open calls */
@@ -17897,6 +17884,14 @@ static int tg3_init_one(struct pci_dev *pdev,
17897 goto err_out_apeunmap; 17884 goto err_out_apeunmap;
17898 } 17885 }
17899 17886
17887 if (tg3_flag(tp, PTP_CAPABLE)) {
17888 tg3_ptp_init(tp);
17889 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17890 &tp->pdev->dev);
17891 if (IS_ERR(tp->ptp_clock))
17892 tp->ptp_clock = NULL;
17893 }
17894
17900 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", 17895 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17901 tp->board_part_number, 17896 tp->board_part_number,
17902 tg3_chip_rev_id(tp), 17897 tg3_chip_rev_id(tp),
@@ -17972,6 +17967,8 @@ static void tg3_remove_one(struct pci_dev *pdev)
17972 if (dev) { 17967 if (dev) {
17973 struct tg3 *tp = netdev_priv(dev); 17968 struct tg3 *tp = netdev_priv(dev);
17974 17969
17970 tg3_ptp_fini(tp);
17971
17975 release_firmware(tp->fw); 17972 release_firmware(tp->fw);
17976 17973
17977 tg3_reset_task_cancel(tp); 17974 tg3_reset_task_cancel(tp);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 323721838cf9..7714d7790089 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2824,8 +2824,8 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2824 u32 gso_size; 2824 u32 gso_size;
2825 u16 vlan_tag = 0; 2825 u16 vlan_tag = 0;
2826 2826
2827 if (vlan_tx_tag_present(skb)) { 2827 if (skb_vlan_tag_present(skb)) {
2828 vlan_tag = (u16)vlan_tx_tag_get(skb); 2828 vlan_tag = (u16)skb_vlan_tag_get(skb);
2829 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); 2829 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2830 } 2830 }
2831 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { 2831 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3767271c7667..ad76b8e35a00 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1691,7 +1691,7 @@ static int hash_get_index(__u8 *addr)
1691 1691
1692 for (j = 0; j < 6; j++) { 1692 for (j = 0; j < 6; j++) {
1693 for (i = 0, bitval = 0; i < 8; i++) 1693 for (i = 0, bitval = 0; i < 8; i++)
1694 bitval ^= hash_bit_value(i*6 + j, addr); 1694 bitval ^= hash_bit_value(i * 6 + j, addr);
1695 1695
1696 hash_index |= (bitval << j); 1696 hash_index |= (bitval << j);
1697 } 1697 }
@@ -1827,12 +1827,23 @@ static int macb_close(struct net_device *dev)
1827 1827
1828static void gem_update_stats(struct macb *bp) 1828static void gem_update_stats(struct macb *bp)
1829{ 1829{
1830 u32 __iomem *reg = bp->regs + GEM_OTX; 1830 int i;
1831 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 1831 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1832 u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
1833 1832
1834 for (; p < end; p++, reg++) 1833 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1835 *p += __raw_readl(reg); 1834 u32 offset = gem_statistics[i].offset;
1835 u64 val = __raw_readl(bp->regs + offset);
1836
1837 bp->ethtool_stats[i] += val;
1838 *p += val;
1839
1840 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1841 /* Add GEM_OCTTXH, GEM_OCTRXH */
1842 val = __raw_readl(bp->regs + offset + 4);
1843 bp->ethtool_stats[i] += ((u64)val) << 32;
1844 *(++p) += val;
1845 }
1846 }
1836} 1847}
1837 1848
1838static struct net_device_stats *gem_get_stats(struct macb *bp) 1849static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -1873,6 +1884,39 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
1873 return nstat; 1884 return nstat;
1874} 1885}
1875 1886
1887static void gem_get_ethtool_stats(struct net_device *dev,
1888 struct ethtool_stats *stats, u64 *data)
1889{
1890 struct macb *bp;
1891
1892 bp = netdev_priv(dev);
1893 gem_update_stats(bp);
1894 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
1895}
1896
1897static int gem_get_sset_count(struct net_device *dev, int sset)
1898{
1899 switch (sset) {
1900 case ETH_SS_STATS:
1901 return GEM_STATS_LEN;
1902 default:
1903 return -EOPNOTSUPP;
1904 }
1905}
1906
1907static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
1908{
1909 int i;
1910
1911 switch (sset) {
1912 case ETH_SS_STATS:
1913 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
1914 memcpy(p, gem_statistics[i].stat_string,
1915 ETH_GSTRING_LEN);
1916 break;
1917 }
1918}
1919
1876struct net_device_stats *macb_get_stats(struct net_device *dev) 1920struct net_device_stats *macb_get_stats(struct net_device *dev)
1877{ 1921{
1878 struct macb *bp = netdev_priv(dev); 1922 struct macb *bp = netdev_priv(dev);
@@ -1991,6 +2035,18 @@ const struct ethtool_ops macb_ethtool_ops = {
1991}; 2035};
1992EXPORT_SYMBOL_GPL(macb_ethtool_ops); 2036EXPORT_SYMBOL_GPL(macb_ethtool_ops);
1993 2037
2038static const struct ethtool_ops gem_ethtool_ops = {
2039 .get_settings = macb_get_settings,
2040 .set_settings = macb_set_settings,
2041 .get_regs_len = macb_get_regs_len,
2042 .get_regs = macb_get_regs,
2043 .get_link = ethtool_op_get_link,
2044 .get_ts_info = ethtool_op_get_ts_info,
2045 .get_ethtool_stats = gem_get_ethtool_stats,
2046 .get_strings = gem_get_ethtool_strings,
2047 .get_sset_count = gem_get_sset_count,
2048};
2049
1994int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2050int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1995{ 2051{
1996 struct macb *bp = netdev_priv(dev); 2052 struct macb *bp = netdev_priv(dev);
@@ -2148,7 +2204,7 @@ static void macb_probe_queues(void __iomem *mem,
2148 (*num_queues)++; 2204 (*num_queues)++;
2149} 2205}
2150 2206
2151static int __init macb_probe(struct platform_device *pdev) 2207static int macb_probe(struct platform_device *pdev)
2152{ 2208{
2153 struct macb_platform_data *pdata; 2209 struct macb_platform_data *pdata;
2154 struct resource *regs; 2210 struct resource *regs;
@@ -2278,7 +2334,6 @@ static int __init macb_probe(struct platform_device *pdev)
2278 2334
2279 dev->netdev_ops = &macb_netdev_ops; 2335 dev->netdev_ops = &macb_netdev_ops;
2280 netif_napi_add(dev, &bp->napi, macb_poll, 64); 2336 netif_napi_add(dev, &bp->napi, macb_poll, 64);
2281 dev->ethtool_ops = &macb_ethtool_ops;
2282 2337
2283 dev->base_addr = regs->start; 2338 dev->base_addr = regs->start;
2284 2339
@@ -2292,12 +2347,14 @@ static int __init macb_probe(struct platform_device *pdev)
2292 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; 2347 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2293 bp->macbgem_ops.mog_init_rings = gem_init_rings; 2348 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2294 bp->macbgem_ops.mog_rx = gem_rx; 2349 bp->macbgem_ops.mog_rx = gem_rx;
2350 dev->ethtool_ops = &gem_ethtool_ops;
2295 } else { 2351 } else {
2296 bp->max_tx_length = MACB_MAX_TX_LEN; 2352 bp->max_tx_length = MACB_MAX_TX_LEN;
2297 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; 2353 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2298 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; 2354 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2299 bp->macbgem_ops.mog_init_rings = macb_init_rings; 2355 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2300 bp->macbgem_ops.mog_rx = macb_rx; 2356 bp->macbgem_ops.mog_rx = macb_rx;
2357 dev->ethtool_ops = &macb_ethtool_ops;
2301 } 2358 }
2302 2359
2303 /* Set features */ 2360 /* Set features */
@@ -2386,7 +2443,7 @@ err_out:
2386 return err; 2443 return err;
2387} 2444}
2388 2445
2389static int __exit macb_remove(struct platform_device *pdev) 2446static int macb_remove(struct platform_device *pdev)
2390{ 2447{
2391 struct net_device *dev; 2448 struct net_device *dev;
2392 struct macb *bp; 2449 struct macb *bp;
@@ -2411,8 +2468,7 @@ static int __exit macb_remove(struct platform_device *pdev)
2411 return 0; 2468 return 0;
2412} 2469}
2413 2470
2414#ifdef CONFIG_PM 2471static int __maybe_unused macb_suspend(struct device *dev)
2415static int macb_suspend(struct device *dev)
2416{ 2472{
2417 struct platform_device *pdev = to_platform_device(dev); 2473 struct platform_device *pdev = to_platform_device(dev);
2418 struct net_device *netdev = platform_get_drvdata(pdev); 2474 struct net_device *netdev = platform_get_drvdata(pdev);
@@ -2429,7 +2485,7 @@ static int macb_suspend(struct device *dev)
2429 return 0; 2485 return 0;
2430} 2486}
2431 2487
2432static int macb_resume(struct device *dev) 2488static int __maybe_unused macb_resume(struct device *dev)
2433{ 2489{
2434 struct platform_device *pdev = to_platform_device(dev); 2490 struct platform_device *pdev = to_platform_device(dev);
2435 struct net_device *netdev = platform_get_drvdata(pdev); 2491 struct net_device *netdev = platform_get_drvdata(pdev);
@@ -2444,12 +2500,12 @@ static int macb_resume(struct device *dev)
2444 2500
2445 return 0; 2501 return 0;
2446} 2502}
2447#endif
2448 2503
2449static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); 2504static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
2450 2505
2451static struct platform_driver macb_driver = { 2506static struct platform_driver macb_driver = {
2452 .remove = __exit_p(macb_remove), 2507 .probe = macb_probe,
2508 .remove = macb_remove,
2453 .driver = { 2509 .driver = {
2454 .name = "macb", 2510 .name = "macb",
2455 .of_match_table = of_match_ptr(macb_dt_ids), 2511 .of_match_table = of_match_ptr(macb_dt_ids),
@@ -2457,7 +2513,7 @@ static struct platform_driver macb_driver = {
2457 }, 2513 },
2458}; 2514};
2459 2515
2460module_platform_driver_probe(macb_driver, macb_probe); 2516module_platform_driver(macb_driver);
2461 2517
2462MODULE_LICENSE("GPL"); 2518MODULE_LICENSE("GPL");
2463MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 2519MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 084191b6fad2..31dc080f2437 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -15,263 +15,309 @@
15#define MACB_MAX_QUEUES 8 15#define MACB_MAX_QUEUES 8
16 16
17/* MACB register offsets */ 17/* MACB register offsets */
18#define MACB_NCR 0x0000 18#define MACB_NCR 0x0000 /* Network Control */
19#define MACB_NCFGR 0x0004 19#define MACB_NCFGR 0x0004 /* Network Config */
20#define MACB_NSR 0x0008 20#define MACB_NSR 0x0008 /* Network Status */
21#define MACB_TAR 0x000c /* AT91RM9200 only */ 21#define MACB_TAR 0x000c /* AT91RM9200 only */
22#define MACB_TCR 0x0010 /* AT91RM9200 only */ 22#define MACB_TCR 0x0010 /* AT91RM9200 only */
23#define MACB_TSR 0x0014 23#define MACB_TSR 0x0014 /* Transmit Status */
24#define MACB_RBQP 0x0018 24#define MACB_RBQP 0x0018 /* RX Q Base Address */
25#define MACB_TBQP 0x001c 25#define MACB_TBQP 0x001c /* TX Q Base Address */
26#define MACB_RSR 0x0020 26#define MACB_RSR 0x0020 /* Receive Status */
27#define MACB_ISR 0x0024 27#define MACB_ISR 0x0024 /* Interrupt Status */
28#define MACB_IER 0x0028 28#define MACB_IER 0x0028 /* Interrupt Enable */
29#define MACB_IDR 0x002c 29#define MACB_IDR 0x002c /* Interrupt Disable */
30#define MACB_IMR 0x0030 30#define MACB_IMR 0x0030 /* Interrupt Mask */
31#define MACB_MAN 0x0034 31#define MACB_MAN 0x0034 /* PHY Maintenance */
32#define MACB_PTR 0x0038 32#define MACB_PTR 0x0038
33#define MACB_PFR 0x003c 33#define MACB_PFR 0x003c
34#define MACB_FTO 0x0040 34#define MACB_FTO 0x0040
35#define MACB_SCF 0x0044 35#define MACB_SCF 0x0044
36#define MACB_MCF 0x0048 36#define MACB_MCF 0x0048
37#define MACB_FRO 0x004c 37#define MACB_FRO 0x004c
38#define MACB_FCSE 0x0050 38#define MACB_FCSE 0x0050
39#define MACB_ALE 0x0054 39#define MACB_ALE 0x0054
40#define MACB_DTF 0x0058 40#define MACB_DTF 0x0058
41#define MACB_LCOL 0x005c 41#define MACB_LCOL 0x005c
42#define MACB_EXCOL 0x0060 42#define MACB_EXCOL 0x0060
43#define MACB_TUND 0x0064 43#define MACB_TUND 0x0064
44#define MACB_CSE 0x0068 44#define MACB_CSE 0x0068
45#define MACB_RRE 0x006c 45#define MACB_RRE 0x006c
46#define MACB_ROVR 0x0070 46#define MACB_ROVR 0x0070
47#define MACB_RSE 0x0074 47#define MACB_RSE 0x0074
48#define MACB_ELE 0x0078 48#define MACB_ELE 0x0078
49#define MACB_RJA 0x007c 49#define MACB_RJA 0x007c
50#define MACB_USF 0x0080 50#define MACB_USF 0x0080
51#define MACB_STE 0x0084 51#define MACB_STE 0x0084
52#define MACB_RLE 0x0088 52#define MACB_RLE 0x0088
53#define MACB_TPF 0x008c 53#define MACB_TPF 0x008c
54#define MACB_HRB 0x0090 54#define MACB_HRB 0x0090
55#define MACB_HRT 0x0094 55#define MACB_HRT 0x0094
56#define MACB_SA1B 0x0098 56#define MACB_SA1B 0x0098
57#define MACB_SA1T 0x009c 57#define MACB_SA1T 0x009c
58#define MACB_SA2B 0x00a0 58#define MACB_SA2B 0x00a0
59#define MACB_SA2T 0x00a4 59#define MACB_SA2T 0x00a4
60#define MACB_SA3B 0x00a8 60#define MACB_SA3B 0x00a8
61#define MACB_SA3T 0x00ac 61#define MACB_SA3T 0x00ac
62#define MACB_SA4B 0x00b0 62#define MACB_SA4B 0x00b0
63#define MACB_SA4T 0x00b4 63#define MACB_SA4T 0x00b4
64#define MACB_TID 0x00b8 64#define MACB_TID 0x00b8
65#define MACB_TPQ 0x00bc 65#define MACB_TPQ 0x00bc
66#define MACB_USRIO 0x00c0 66#define MACB_USRIO 0x00c0
67#define MACB_WOL 0x00c4 67#define MACB_WOL 0x00c4
68#define MACB_MID 0x00fc 68#define MACB_MID 0x00fc
69 69
70/* GEM register offsets. */ 70/* GEM register offsets. */
71#define GEM_NCFGR 0x0004 71#define GEM_NCFGR 0x0004 /* Network Config */
72#define GEM_USRIO 0x000c 72#define GEM_USRIO 0x000c /* User IO */
73#define GEM_DMACFG 0x0010 73#define GEM_DMACFG 0x0010 /* DMA Configuration */
74#define GEM_HRB 0x0080 74#define GEM_HRB 0x0080 /* Hash Bottom */
75#define GEM_HRT 0x0084 75#define GEM_HRT 0x0084 /* Hash Top */
76#define GEM_SA1B 0x0088 76#define GEM_SA1B 0x0088 /* Specific1 Bottom */
77#define GEM_SA1T 0x008C 77#define GEM_SA1T 0x008C /* Specific1 Top */
78#define GEM_SA2B 0x0090 78#define GEM_SA2B 0x0090 /* Specific2 Bottom */
79#define GEM_SA2T 0x0094 79#define GEM_SA2T 0x0094 /* Specific2 Top */
80#define GEM_SA3B 0x0098 80#define GEM_SA3B 0x0098 /* Specific3 Bottom */
81#define GEM_SA3T 0x009C 81#define GEM_SA3T 0x009C /* Specific3 Top */
82#define GEM_SA4B 0x00A0 82#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
83#define GEM_SA4T 0x00A4 83#define GEM_SA4T 0x00A4 /* Specific4 Top */
84#define GEM_OTX 0x0100 84#define GEM_OTX 0x0100 /* Octets transmitted */
85#define GEM_DCFG1 0x0280 85#define GEM_OCTTXL 0x0100 /* Octets transmitted [31:0] */
86#define GEM_DCFG2 0x0284 86#define GEM_OCTTXH 0x0104 /* Octets transmitted [47:32] */
87#define GEM_DCFG3 0x0288 87#define GEM_TXCNT 0x0108 /* Frames Transmitted counter */
88#define GEM_DCFG4 0x028c 88#define GEM_TXBCCNT 0x010c /* Broadcast Frames counter */
89#define GEM_DCFG5 0x0290 89#define GEM_TXMCCNT 0x0110 /* Multicast Frames counter */
90#define GEM_DCFG6 0x0294 90#define GEM_TXPAUSECNT 0x0114 /* Pause Frames Transmitted Counter */
91#define GEM_DCFG7 0x0298 91#define GEM_TX64CNT 0x0118 /* 64 byte Frames TX counter */
92 92#define GEM_TX65CNT 0x011c /* 65-127 byte Frames TX counter */
93#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2)) 93#define GEM_TX128CNT 0x0120 /* 128-255 byte Frames TX counter */
94#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) 94#define GEM_TX256CNT 0x0124 /* 256-511 byte Frames TX counter */
95#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2)) 95#define GEM_TX512CNT 0x0128 /* 512-1023 byte Frames TX counter */
96#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2)) 96#define GEM_TX1024CNT 0x012c /* 1024-1518 byte Frames TX counter */
97#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) 97#define GEM_TX1519CNT 0x0130 /* 1519+ byte Frames TX counter */
98#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2)) 98#define GEM_TXURUNCNT 0x0134 /* TX under run error counter */
99#define GEM_SNGLCOLLCNT 0x0138 /* Single Collision Frame Counter */
100#define GEM_MULTICOLLCNT 0x013c /* Multiple Collision Frame Counter */
101#define GEM_EXCESSCOLLCNT 0x0140 /* Excessive Collision Frame Counter */
102#define GEM_LATECOLLCNT 0x0144 /* Late Collision Frame Counter */
103#define GEM_TXDEFERCNT 0x0148 /* Deferred Transmission Frame Counter */
104#define GEM_TXCSENSECNT 0x014c /* Carrier Sense Error Counter */
105#define GEM_ORX 0x0150 /* Octets received */
106#define GEM_OCTRXL 0x0150 /* Octets received [31:0] */
107#define GEM_OCTRXH 0x0154 /* Octets received [47:32] */
108#define GEM_RXCNT 0x0158 /* Frames Received Counter */
109#define GEM_RXBROADCNT 0x015c /* Broadcast Frames Received Counter */
110#define GEM_RXMULTICNT 0x0160 /* Multicast Frames Received Counter */
111#define GEM_RXPAUSECNT 0x0164 /* Pause Frames Received Counter */
112#define GEM_RX64CNT 0x0168 /* 64 byte Frames RX Counter */
113#define GEM_RX65CNT 0x016c /* 65-127 byte Frames RX Counter */
114#define GEM_RX128CNT 0x0170 /* 128-255 byte Frames RX Counter */
115#define GEM_RX256CNT 0x0174 /* 256-511 byte Frames RX Counter */
116#define GEM_RX512CNT 0x0178 /* 512-1023 byte Frames RX Counter */
117#define GEM_RX1024CNT 0x017c /* 1024-1518 byte Frames RX Counter */
118#define GEM_RX1519CNT 0x0180 /* 1519+ byte Frames RX Counter */
119#define GEM_RXUNDRCNT 0x0184 /* Undersize Frames Received Counter */
120#define GEM_RXOVRCNT 0x0188 /* Oversize Frames Received Counter */
121#define GEM_RXJABCNT 0x018c /* Jabbers Received Counter */
122#define GEM_RXFCSCNT 0x0190 /* Frame Check Sequence Error Counter */
123#define GEM_RXLENGTHCNT 0x0194 /* Length Field Error Counter */
124#define GEM_RXSYMBCNT 0x0198 /* Symbol Error Counter */
125#define GEM_RXALIGNCNT 0x019c /* Alignment Error Counter */
126#define GEM_RXRESERRCNT 0x01a0 /* Receive Resource Error Counter */
127#define GEM_RXORCNT 0x01a4 /* Receive Overrun Counter */
128#define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */
129#define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */
130#define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */
131#define GEM_DCFG1 0x0280 /* Design Config 1 */
132#define GEM_DCFG2 0x0284 /* Design Config 2 */
133#define GEM_DCFG3 0x0288 /* Design Config 3 */
134#define GEM_DCFG4 0x028c /* Design Config 4 */
135#define GEM_DCFG5 0x0290 /* Design Config 5 */
136#define GEM_DCFG6 0x0294 /* Design Config 6 */
137#define GEM_DCFG7 0x0298 /* Design Config 7 */
138
139#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
140#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
141#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
142#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
143#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
144#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
99 145
100/* Bitfields in NCR */ 146/* Bitfields in NCR */
101#define MACB_LB_OFFSET 0 147#define MACB_LB_OFFSET 0 /* reserved */
102#define MACB_LB_SIZE 1 148#define MACB_LB_SIZE 1
103#define MACB_LLB_OFFSET 1 149#define MACB_LLB_OFFSET 1 /* Loop back local */
104#define MACB_LLB_SIZE 1 150#define MACB_LLB_SIZE 1
105#define MACB_RE_OFFSET 2 151#define MACB_RE_OFFSET 2 /* Receive enable */
106#define MACB_RE_SIZE 1 152#define MACB_RE_SIZE 1
107#define MACB_TE_OFFSET 3 153#define MACB_TE_OFFSET 3 /* Transmit enable */
108#define MACB_TE_SIZE 1 154#define MACB_TE_SIZE 1
109#define MACB_MPE_OFFSET 4 155#define MACB_MPE_OFFSET 4 /* Management port enable */
110#define MACB_MPE_SIZE 1 156#define MACB_MPE_SIZE 1
111#define MACB_CLRSTAT_OFFSET 5 157#define MACB_CLRSTAT_OFFSET 5 /* Clear stats regs */
112#define MACB_CLRSTAT_SIZE 1 158#define MACB_CLRSTAT_SIZE 1
113#define MACB_INCSTAT_OFFSET 6 159#define MACB_INCSTAT_OFFSET 6 /* Incremental stats regs */
114#define MACB_INCSTAT_SIZE 1 160#define MACB_INCSTAT_SIZE 1
115#define MACB_WESTAT_OFFSET 7 161#define MACB_WESTAT_OFFSET 7 /* Write enable stats regs */
116#define MACB_WESTAT_SIZE 1 162#define MACB_WESTAT_SIZE 1
117#define MACB_BP_OFFSET 8 163#define MACB_BP_OFFSET 8 /* Back pressure */
118#define MACB_BP_SIZE 1 164#define MACB_BP_SIZE 1
119#define MACB_TSTART_OFFSET 9 165#define MACB_TSTART_OFFSET 9 /* Start transmission */
120#define MACB_TSTART_SIZE 1 166#define MACB_TSTART_SIZE 1
121#define MACB_THALT_OFFSET 10 167#define MACB_THALT_OFFSET 10 /* Transmit halt */
122#define MACB_THALT_SIZE 1 168#define MACB_THALT_SIZE 1
123#define MACB_NCR_TPF_OFFSET 11 169#define MACB_NCR_TPF_OFFSET 11 /* Transmit pause frame */
124#define MACB_NCR_TPF_SIZE 1 170#define MACB_NCR_TPF_SIZE 1
125#define MACB_TZQ_OFFSET 12 171#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
126#define MACB_TZQ_SIZE 1 172#define MACB_TZQ_SIZE 1
127 173
128/* Bitfields in NCFGR */ 174/* Bitfields in NCFGR */
129#define MACB_SPD_OFFSET 0 175#define MACB_SPD_OFFSET 0 /* Speed */
130#define MACB_SPD_SIZE 1 176#define MACB_SPD_SIZE 1
131#define MACB_FD_OFFSET 1 177#define MACB_FD_OFFSET 1 /* Full duplex */
132#define MACB_FD_SIZE 1 178#define MACB_FD_SIZE 1
133#define MACB_BIT_RATE_OFFSET 2 179#define MACB_BIT_RATE_OFFSET 2 /* Discard non-VLAN frames */
134#define MACB_BIT_RATE_SIZE 1 180#define MACB_BIT_RATE_SIZE 1
135#define MACB_JFRAME_OFFSET 3 181#define MACB_JFRAME_OFFSET 3 /* reserved */
136#define MACB_JFRAME_SIZE 1 182#define MACB_JFRAME_SIZE 1
137#define MACB_CAF_OFFSET 4 183#define MACB_CAF_OFFSET 4 /* Copy all frames */
138#define MACB_CAF_SIZE 1 184#define MACB_CAF_SIZE 1
139#define MACB_NBC_OFFSET 5 185#define MACB_NBC_OFFSET 5 /* No broadcast */
140#define MACB_NBC_SIZE 1 186#define MACB_NBC_SIZE 1
141#define MACB_NCFGR_MTI_OFFSET 6 187#define MACB_NCFGR_MTI_OFFSET 6 /* Multicast hash enable */
142#define MACB_NCFGR_MTI_SIZE 1 188#define MACB_NCFGR_MTI_SIZE 1
143#define MACB_UNI_OFFSET 7 189#define MACB_UNI_OFFSET 7 /* Unicast hash enable */
144#define MACB_UNI_SIZE 1 190#define MACB_UNI_SIZE 1
145#define MACB_BIG_OFFSET 8 191#define MACB_BIG_OFFSET 8 /* Receive 1536 byte frames */
146#define MACB_BIG_SIZE 1 192#define MACB_BIG_SIZE 1
147#define MACB_EAE_OFFSET 9 193#define MACB_EAE_OFFSET 9 /* External address match enable */
148#define MACB_EAE_SIZE 1 194#define MACB_EAE_SIZE 1
149#define MACB_CLK_OFFSET 10 195#define MACB_CLK_OFFSET 10
150#define MACB_CLK_SIZE 2 196#define MACB_CLK_SIZE 2
151#define MACB_RTY_OFFSET 12 197#define MACB_RTY_OFFSET 12 /* Retry test */
152#define MACB_RTY_SIZE 1 198#define MACB_RTY_SIZE 1
153#define MACB_PAE_OFFSET 13 199#define MACB_PAE_OFFSET 13 /* Pause enable */
154#define MACB_PAE_SIZE 1 200#define MACB_PAE_SIZE 1
155#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */ 201#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */
156#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */ 202#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */
157#define MACB_RBOF_OFFSET 14 203#define MACB_RBOF_OFFSET 14 /* Receive buffer offset */
158#define MACB_RBOF_SIZE 2 204#define MACB_RBOF_SIZE 2
159#define MACB_RLCE_OFFSET 16 205#define MACB_RLCE_OFFSET 16 /* Length field error frame discard */
160#define MACB_RLCE_SIZE 1 206#define MACB_RLCE_SIZE 1
161#define MACB_DRFCS_OFFSET 17 207#define MACB_DRFCS_OFFSET 17 /* FCS remove */
162#define MACB_DRFCS_SIZE 1 208#define MACB_DRFCS_SIZE 1
163#define MACB_EFRHD_OFFSET 18 209#define MACB_EFRHD_OFFSET 18
164#define MACB_EFRHD_SIZE 1 210#define MACB_EFRHD_SIZE 1
165#define MACB_IRXFCS_OFFSET 19 211#define MACB_IRXFCS_OFFSET 19
166#define MACB_IRXFCS_SIZE 1 212#define MACB_IRXFCS_SIZE 1
167 213
168/* GEM specific NCFGR bitfields. */ 214/* GEM specific NCFGR bitfields. */
169#define GEM_GBE_OFFSET 10 215#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */
170#define GEM_GBE_SIZE 1 216#define GEM_GBE_SIZE 1
171#define GEM_CLK_OFFSET 18 217#define GEM_CLK_OFFSET 18 /* MDC clock division */
172#define GEM_CLK_SIZE 3 218#define GEM_CLK_SIZE 3
173#define GEM_DBW_OFFSET 21 219#define GEM_DBW_OFFSET 21 /* Data bus width */
174#define GEM_DBW_SIZE 2 220#define GEM_DBW_SIZE 2
175#define GEM_RXCOEN_OFFSET 24 221#define GEM_RXCOEN_OFFSET 24
176#define GEM_RXCOEN_SIZE 1 222#define GEM_RXCOEN_SIZE 1
177 223
178/* Constants for data bus width. */ 224/* Constants for data bus width. */
179#define GEM_DBW32 0 225#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */
180#define GEM_DBW64 1 226#define GEM_DBW64 1 /* 64 bit AMBA AHB data bus width */
181#define GEM_DBW128 2 227#define GEM_DBW128 2 /* 128 bit AMBA AHB data bus width */
182 228
183/* Bitfields in DMACFG. */ 229/* Bitfields in DMACFG. */
184#define GEM_FBLDO_OFFSET 0 230#define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */
185#define GEM_FBLDO_SIZE 5 231#define GEM_FBLDO_SIZE 5
186#define GEM_ENDIA_OFFSET 7 232#define GEM_ENDIA_OFFSET 7 /* endian swap mode for packet data access */
187#define GEM_ENDIA_SIZE 1 233#define GEM_ENDIA_SIZE 1
188#define GEM_RXBMS_OFFSET 8 234#define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */
189#define GEM_RXBMS_SIZE 2 235#define GEM_RXBMS_SIZE 2
190#define GEM_TXPBMS_OFFSET 10 236#define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */
191#define GEM_TXPBMS_SIZE 1 237#define GEM_TXPBMS_SIZE 1
192#define GEM_TXCOEN_OFFSET 11 238#define GEM_TXCOEN_OFFSET 11 /* TX IP/TCP/UDP checksum gen offload */
193#define GEM_TXCOEN_SIZE 1 239#define GEM_TXCOEN_SIZE 1
194#define GEM_RXBS_OFFSET 16 240#define GEM_RXBS_OFFSET 16 /* DMA receive buffer size */
195#define GEM_RXBS_SIZE 8 241#define GEM_RXBS_SIZE 8
196#define GEM_DDRP_OFFSET 24 242#define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */
197#define GEM_DDRP_SIZE 1 243#define GEM_DDRP_SIZE 1
198 244
199 245
200/* Bitfields in NSR */ 246/* Bitfields in NSR */
201#define MACB_NSR_LINK_OFFSET 0 247#define MACB_NSR_LINK_OFFSET 0 /* pcs_link_state */
202#define MACB_NSR_LINK_SIZE 1 248#define MACB_NSR_LINK_SIZE 1
203#define MACB_MDIO_OFFSET 1 249#define MACB_MDIO_OFFSET 1 /* status of the mdio_in pin */
204#define MACB_MDIO_SIZE 1 250#define MACB_MDIO_SIZE 1
205#define MACB_IDLE_OFFSET 2 251#define MACB_IDLE_OFFSET 2 /* The PHY management logic is idle */
206#define MACB_IDLE_SIZE 1 252#define MACB_IDLE_SIZE 1
207 253
208/* Bitfields in TSR */ 254/* Bitfields in TSR */
209#define MACB_UBR_OFFSET 0 255#define MACB_UBR_OFFSET 0 /* Used bit read */
210#define MACB_UBR_SIZE 1 256#define MACB_UBR_SIZE 1
211#define MACB_COL_OFFSET 1 257#define MACB_COL_OFFSET 1 /* Collision occurred */
212#define MACB_COL_SIZE 1 258#define MACB_COL_SIZE 1
213#define MACB_TSR_RLE_OFFSET 2 259#define MACB_TSR_RLE_OFFSET 2 /* Retry limit exceeded */
214#define MACB_TSR_RLE_SIZE 1 260#define MACB_TSR_RLE_SIZE 1
215#define MACB_TGO_OFFSET 3 261#define MACB_TGO_OFFSET 3 /* Transmit go */
216#define MACB_TGO_SIZE 1 262#define MACB_TGO_SIZE 1
217#define MACB_BEX_OFFSET 4 263#define MACB_BEX_OFFSET 4 /* TX frame corruption due to AHB error */
218#define MACB_BEX_SIZE 1 264#define MACB_BEX_SIZE 1
219#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */ 265#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
220#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */ 266#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */
221#define MACB_COMP_OFFSET 5 267#define MACB_COMP_OFFSET 5 /* Trnasmit complete */
222#define MACB_COMP_SIZE 1 268#define MACB_COMP_SIZE 1
223#define MACB_UND_OFFSET 6 269#define MACB_UND_OFFSET 6 /* Trnasmit under run */
224#define MACB_UND_SIZE 1 270#define MACB_UND_SIZE 1
225 271
226/* Bitfields in RSR */ 272/* Bitfields in RSR */
227#define MACB_BNA_OFFSET 0 273#define MACB_BNA_OFFSET 0 /* Buffer not available */
228#define MACB_BNA_SIZE 1 274#define MACB_BNA_SIZE 1
229#define MACB_REC_OFFSET 1 275#define MACB_REC_OFFSET 1 /* Frame received */
230#define MACB_REC_SIZE 1 276#define MACB_REC_SIZE 1
231#define MACB_OVR_OFFSET 2 277#define MACB_OVR_OFFSET 2 /* Receive overrun */
232#define MACB_OVR_SIZE 1 278#define MACB_OVR_SIZE 1
233 279
234/* Bitfields in ISR/IER/IDR/IMR */ 280/* Bitfields in ISR/IER/IDR/IMR */
235#define MACB_MFD_OFFSET 0 281#define MACB_MFD_OFFSET 0 /* Management frame sent */
236#define MACB_MFD_SIZE 1 282#define MACB_MFD_SIZE 1
237#define MACB_RCOMP_OFFSET 1 283#define MACB_RCOMP_OFFSET 1 /* Receive complete */
238#define MACB_RCOMP_SIZE 1 284#define MACB_RCOMP_SIZE 1
239#define MACB_RXUBR_OFFSET 2 285#define MACB_RXUBR_OFFSET 2 /* RX used bit read */
240#define MACB_RXUBR_SIZE 1 286#define MACB_RXUBR_SIZE 1
241#define MACB_TXUBR_OFFSET 3 287#define MACB_TXUBR_OFFSET 3 /* TX used bit read */
242#define MACB_TXUBR_SIZE 1 288#define MACB_TXUBR_SIZE 1
243#define MACB_ISR_TUND_OFFSET 4 289#define MACB_ISR_TUND_OFFSET 4 /* Enable TX buffer under run interrupt */
244#define MACB_ISR_TUND_SIZE 1 290#define MACB_ISR_TUND_SIZE 1
245#define MACB_ISR_RLE_OFFSET 5 291#define MACB_ISR_RLE_OFFSET 5 /* EN retry exceeded/late coll interrupt */
246#define MACB_ISR_RLE_SIZE 1 292#define MACB_ISR_RLE_SIZE 1
247#define MACB_TXERR_OFFSET 6 293#define MACB_TXERR_OFFSET 6 /* EN TX frame corrupt from error interrupt */
248#define MACB_TXERR_SIZE 1 294#define MACB_TXERR_SIZE 1
249#define MACB_TCOMP_OFFSET 7 295#define MACB_TCOMP_OFFSET 7 /* Enable transmit complete interrupt */
250#define MACB_TCOMP_SIZE 1 296#define MACB_TCOMP_SIZE 1
251#define MACB_ISR_LINK_OFFSET 9 297#define MACB_ISR_LINK_OFFSET 9 /* Enable link change interrupt */
252#define MACB_ISR_LINK_SIZE 1 298#define MACB_ISR_LINK_SIZE 1
253#define MACB_ISR_ROVR_OFFSET 10 299#define MACB_ISR_ROVR_OFFSET 10 /* Enable receive overrun interrupt */
254#define MACB_ISR_ROVR_SIZE 1 300#define MACB_ISR_ROVR_SIZE 1
255#define MACB_HRESP_OFFSET 11 301#define MACB_HRESP_OFFSET 11 /* Enable hrsep not OK interrupt */
256#define MACB_HRESP_SIZE 1 302#define MACB_HRESP_SIZE 1
257#define MACB_PFR_OFFSET 12 303#define MACB_PFR_OFFSET 12 /* Enable pause frame w/ quantum interrupt */
258#define MACB_PFR_SIZE 1 304#define MACB_PFR_SIZE 1
259#define MACB_PTZ_OFFSET 13 305#define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */
260#define MACB_PTZ_SIZE 1 306#define MACB_PTZ_SIZE 1
261 307
262/* Bitfields in MAN */ 308/* Bitfields in MAN */
263#define MACB_DATA_OFFSET 0 309#define MACB_DATA_OFFSET 0 /* data */
264#define MACB_DATA_SIZE 16 310#define MACB_DATA_SIZE 16
265#define MACB_CODE_OFFSET 16 311#define MACB_CODE_OFFSET 16 /* Must be written to 10 */
266#define MACB_CODE_SIZE 2 312#define MACB_CODE_SIZE 2
267#define MACB_REGA_OFFSET 18 313#define MACB_REGA_OFFSET 18 /* Register address */
268#define MACB_REGA_SIZE 5 314#define MACB_REGA_SIZE 5
269#define MACB_PHYA_OFFSET 23 315#define MACB_PHYA_OFFSET 23 /* PHY address */
270#define MACB_PHYA_SIZE 5 316#define MACB_PHYA_SIZE 5
271#define MACB_RW_OFFSET 28 317#define MACB_RW_OFFSET 28 /* Operation. 10 is read. 01 is write. */
272#define MACB_RW_SIZE 2 318#define MACB_RW_SIZE 2
273#define MACB_SOF_OFFSET 30 319#define MACB_SOF_OFFSET 30 /* Must be written to 1 for Clause 22 */
274#define MACB_SOF_SIZE 2 320#define MACB_SOF_SIZE 2
275 321
276/* Bitfields in USRIO (AVR32) */ 322/* Bitfields in USRIO (AVR32) */
277#define MACB_MII_OFFSET 0 323#define MACB_MII_OFFSET 0
@@ -286,7 +332,7 @@
286/* Bitfields in USRIO (AT91) */ 332/* Bitfields in USRIO (AT91) */
287#define MACB_RMII_OFFSET 0 333#define MACB_RMII_OFFSET 0
288#define MACB_RMII_SIZE 1 334#define MACB_RMII_SIZE 1
289#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */ 335#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
290#define GEM_RGMII_SIZE 1 336#define GEM_RGMII_SIZE 1
291#define MACB_CLKEN_OFFSET 1 337#define MACB_CLKEN_OFFSET 1
292#define MACB_CLKEN_SIZE 1 338#define MACB_CLKEN_SIZE 1
@@ -389,8 +435,7 @@
389#define queue_writel(queue, reg, value) \ 435#define queue_writel(queue, reg, value) \
390 __raw_writel((value), (queue)->bp->regs + (queue)->reg) 436 __raw_writel((value), (queue)->bp->regs + (queue)->reg)
391 437
392/* 438/* Conditional GEM/MACB macros. These perform the operation to the correct
393 * Conditional GEM/MACB macros. These perform the operation to the correct
394 * register dependent on whether the device is a GEM or a MACB. For registers 439 * register dependent on whether the device is a GEM or a MACB. For registers
395 * and bitfields that are common across both devices, use macb_{read,write}l 440 * and bitfields that are common across both devices, use macb_{read,write}l
396 * to avoid the cost of the conditional. 441 * to avoid the cost of the conditional.
@@ -413,8 +458,7 @@
413 __v; \ 458 __v; \
414 }) 459 })
415 460
416/** 461/* struct macb_dma_desc - Hardware DMA descriptor
417 * struct macb_dma_desc - Hardware DMA descriptor
418 * @addr: DMA address of data buffer 462 * @addr: DMA address of data buffer
419 * @ctrl: Control and status bits 463 * @ctrl: Control and status bits
420 */ 464 */
@@ -503,8 +547,7 @@ struct macb_dma_desc {
503/* limit RX checksum offload to TCP and UDP packets */ 547/* limit RX checksum offload to TCP and UDP packets */
504#define GEM_RX_CSUM_CHECKED_MASK 2 548#define GEM_RX_CSUM_CHECKED_MASK 2
505 549
506/** 550/* struct macb_tx_skb - data about an skb which is being transmitted
507 * struct macb_tx_skb - data about an skb which is being transmitted
508 * @skb: skb currently being transmitted, only set for the last buffer 551 * @skb: skb currently being transmitted, only set for the last buffer
509 * of the frame 552 * of the frame
510 * @mapping: DMA address of the skb's fragment buffer 553 * @mapping: DMA address of the skb's fragment buffer
@@ -519,8 +562,7 @@ struct macb_tx_skb {
519 bool mapped_as_page; 562 bool mapped_as_page;
520}; 563};
521 564
522/* 565/* Hardware-collected statistics. Used when updating the network
523 * Hardware-collected statistics. Used when updating the network
524 * device stats by a periodic timer. 566 * device stats by a periodic timer.
525 */ 567 */
526struct macb_stats { 568struct macb_stats {
@@ -595,6 +637,107 @@ struct gem_stats {
595 u32 rx_udp_checksum_errors; 637 u32 rx_udp_checksum_errors;
596}; 638};
597 639
640/* Describes the name and offset of an individual statistic register, as
641 * returned by `ethtool -S`. Also describes which net_device_stats statistics
642 * this register should contribute to.
643 */
644struct gem_statistic {
645 char stat_string[ETH_GSTRING_LEN];
646 int offset;
647 u32 stat_bits;
648};
649
650/* Bitfield defs for net_device_stat statistics */
651#define GEM_NDS_RXERR_OFFSET 0
652#define GEM_NDS_RXLENERR_OFFSET 1
653#define GEM_NDS_RXOVERERR_OFFSET 2
654#define GEM_NDS_RXCRCERR_OFFSET 3
655#define GEM_NDS_RXFRAMEERR_OFFSET 4
656#define GEM_NDS_RXFIFOERR_OFFSET 5
657#define GEM_NDS_TXERR_OFFSET 6
658#define GEM_NDS_TXABORTEDERR_OFFSET 7
659#define GEM_NDS_TXCARRIERERR_OFFSET 8
660#define GEM_NDS_TXFIFOERR_OFFSET 9
661#define GEM_NDS_COLLISIONS_OFFSET 10
662
663#define GEM_STAT_TITLE(name, title) GEM_STAT_TITLE_BITS(name, title, 0)
664#define GEM_STAT_TITLE_BITS(name, title, bits) { \
665 .stat_string = title, \
666 .offset = GEM_##name, \
667 .stat_bits = bits \
668}
669
670/* list of gem statistic registers. The names MUST match the
671 * corresponding GEM_* definitions.
672 */
673static const struct gem_statistic gem_statistics[] = {
674 GEM_STAT_TITLE(OCTTXL, "tx_octets"), /* OCTTXH combined with OCTTXL */
675 GEM_STAT_TITLE(TXCNT, "tx_frames"),
676 GEM_STAT_TITLE(TXBCCNT, "tx_broadcast_frames"),
677 GEM_STAT_TITLE(TXMCCNT, "tx_multicast_frames"),
678 GEM_STAT_TITLE(TXPAUSECNT, "tx_pause_frames"),
679 GEM_STAT_TITLE(TX64CNT, "tx_64_byte_frames"),
680 GEM_STAT_TITLE(TX65CNT, "tx_65_127_byte_frames"),
681 GEM_STAT_TITLE(TX128CNT, "tx_128_255_byte_frames"),
682 GEM_STAT_TITLE(TX256CNT, "tx_256_511_byte_frames"),
683 GEM_STAT_TITLE(TX512CNT, "tx_512_1023_byte_frames"),
684 GEM_STAT_TITLE(TX1024CNT, "tx_1024_1518_byte_frames"),
685 GEM_STAT_TITLE(TX1519CNT, "tx_greater_than_1518_byte_frames"),
686 GEM_STAT_TITLE_BITS(TXURUNCNT, "tx_underrun",
687 GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_TXFIFOERR)),
688 GEM_STAT_TITLE_BITS(SNGLCOLLCNT, "tx_single_collision_frames",
689 GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
690 GEM_STAT_TITLE_BITS(MULTICOLLCNT, "tx_multiple_collision_frames",
691 GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
692 GEM_STAT_TITLE_BITS(EXCESSCOLLCNT, "tx_excessive_collisions",
693 GEM_BIT(NDS_TXERR)|
694 GEM_BIT(NDS_TXABORTEDERR)|
695 GEM_BIT(NDS_COLLISIONS)),
696 GEM_STAT_TITLE_BITS(LATECOLLCNT, "tx_late_collisions",
697 GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
698 GEM_STAT_TITLE(TXDEFERCNT, "tx_deferred_frames"),
699 GEM_STAT_TITLE_BITS(TXCSENSECNT, "tx_carrier_sense_errors",
700 GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)),
701 GEM_STAT_TITLE(OCTRXL, "rx_octets"), /* OCTRXH combined with OCTRXL */
702 GEM_STAT_TITLE(RXCNT, "rx_frames"),
703 GEM_STAT_TITLE(RXBROADCNT, "rx_broadcast_frames"),
704 GEM_STAT_TITLE(RXMULTICNT, "rx_multicast_frames"),
705 GEM_STAT_TITLE(RXPAUSECNT, "rx_pause_frames"),
706 GEM_STAT_TITLE(RX64CNT, "rx_64_byte_frames"),
707 GEM_STAT_TITLE(RX65CNT, "rx_65_127_byte_frames"),
708 GEM_STAT_TITLE(RX128CNT, "rx_128_255_byte_frames"),
709 GEM_STAT_TITLE(RX256CNT, "rx_256_511_byte_frames"),
710 GEM_STAT_TITLE(RX512CNT, "rx_512_1023_byte_frames"),
711 GEM_STAT_TITLE(RX1024CNT, "rx_1024_1518_byte_frames"),
712 GEM_STAT_TITLE(RX1519CNT, "rx_greater_than_1518_byte_frames"),
713 GEM_STAT_TITLE_BITS(RXUNDRCNT, "rx_undersized_frames",
714 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
715 GEM_STAT_TITLE_BITS(RXOVRCNT, "rx_oversize_frames",
716 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
717 GEM_STAT_TITLE_BITS(RXJABCNT, "rx_jabbers",
718 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)),
719 GEM_STAT_TITLE_BITS(RXFCSCNT, "rx_frame_check_sequence_errors",
720 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXCRCERR)),
721 GEM_STAT_TITLE_BITS(RXLENGTHCNT, "rx_length_field_frame_errors",
722 GEM_BIT(NDS_RXERR)),
723 GEM_STAT_TITLE_BITS(RXSYMBCNT, "rx_symbol_errors",
724 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFRAMEERR)),
725 GEM_STAT_TITLE_BITS(RXALIGNCNT, "rx_alignment_errors",
726 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
727 GEM_STAT_TITLE_BITS(RXRESERRCNT, "rx_resource_errors",
728 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)),
729 GEM_STAT_TITLE_BITS(RXORCNT, "rx_overruns",
730 GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFIFOERR)),
731 GEM_STAT_TITLE_BITS(RXIPCCNT, "rx_ip_header_checksum_errors",
732 GEM_BIT(NDS_RXERR)),
733 GEM_STAT_TITLE_BITS(RXTCPCCNT, "rx_tcp_checksum_errors",
734 GEM_BIT(NDS_RXERR)),
735 GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
736 GEM_BIT(NDS_RXERR)),
737};
738
739#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
740
598struct macb; 741struct macb;
599 742
600struct macb_or_gem_ops { 743struct macb_or_gem_ops {
@@ -673,6 +816,8 @@ struct macb {
673 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */ 816 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
674 int skb_length; /* saved skb length for pci_unmap_single */ 817 int skb_length; /* saved skb length for pci_unmap_single */
675 unsigned int max_tx_length; 818 unsigned int max_tx_length;
819
820 u64 ethtool_stats[GEM_STATS_LEN];
676}; 821};
677 822
678extern const struct ethtool_ops macb_ethtool_ops; 823extern const struct ethtool_ops macb_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index babe2a915b00..526ea74e82d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1860,9 +1860,9 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1860 } 1860 }
1861 cpl->iff = dev->if_port; 1861 cpl->iff = dev->if_port;
1862 1862
1863 if (vlan_tx_tag_present(skb)) { 1863 if (skb_vlan_tag_present(skb)) {
1864 cpl->vlan_valid = 1; 1864 cpl->vlan_valid = 1;
1865 cpl->vlan = htons(vlan_tx_tag_get(skb)); 1865 cpl->vlan = htons(skb_vlan_tag_get(skb));
1866 st->vlan_insert++; 1866 st->vlan_insert++;
1867 } else 1867 } else
1868 cpl->vlan_valid = 0; 1868 cpl->vlan_valid = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/mc5.c b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
index e13b7fe9d082..338301b11518 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/mc5.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/mc5.c
@@ -97,14 +97,6 @@ static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
97 F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1); 97 F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
98} 98}
99 99
100static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
101 u32 v3)
102{
103 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
104 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
105 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
106}
107
108static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2, 100static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
109 u32 v3) 101 u32 v3)
110{ 102{
@@ -113,14 +105,6 @@ static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
113 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3); 105 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
114} 106}
115 107
116static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
117 u32 *v3)
118{
119 *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
120 *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
121 *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
122}
123
124/* 108/*
125 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM 109 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
126 * command cmd. The data to be written must have been set up by the caller. 110 * command cmd. The data to be written must have been set up by the caller.
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 3dfcf600fcc6..d6aa602f168d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1148,8 +1148,8 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1148 cpl->len = htonl(skb->len); 1148 cpl->len = htonl(skb->len);
1149 cntrl = V_TXPKT_INTF(pi->port_id); 1149 cntrl = V_TXPKT_INTF(pi->port_id);
1150 1150
1151 if (vlan_tx_tag_present(skb)) 1151 if (skb_vlan_tag_present(skb))
1152 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); 1152 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
1153 1153
1154 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); 1154 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1155 if (tso_info) { 1155 if (tso_info) {
@@ -1282,7 +1282,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1282 qs->port_stats[SGE_PSTAT_TX_CSUM]++; 1282 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1283 if (skb_shinfo(skb)->gso_size) 1283 if (skb_shinfo(skb)->gso_size)
1284 qs->port_stats[SGE_PSTAT_TSO]++; 1284 qs->port_stats[SGE_PSTAT_TSO]++;
1285 if (vlan_tx_tag_present(skb)) 1285 if (skb_vlan_tag_present(skb))
1286 qs->port_stats[SGE_PSTAT_VLANINS]++; 1286 qs->port_stats[SGE_PSTAT_VLANINS]++;
1287 1287
1288 /* 1288 /*
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index c74a898fcd4f..184a8d545ac4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -727,9 +727,9 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
727 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); 727 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
728 } 728 }
729 729
730 for (i = 0; i < 6; i++) 730 ret = hex2bin(p->eth_base, vpd.na_data, 6);
731 p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 + 731 if (ret < 0)
732 hex_to_bin(vpd.na_data[2 * i + 1]); 732 return -EINVAL;
733 return 0; 733 return 0;
734} 734}
735 735
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index b85280775997..ae50cd72358c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -4,6 +4,6 @@
4 4
5obj-$(CONFIG_CHELSIO_T4) += cxgb4.o 5obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
6 6
7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o 7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o
8cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o 8cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
9cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o 9cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
new file mode 100644
index 000000000000..9062a8434246
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -0,0 +1,317 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
4 *
5 * Written by Deepak (deepak.s@chelsio.com)
6 *
7 * This program is distributed in the hope that it will be useful, but WITHOUT
8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
10 * release for licensing terms and conditions.
11 */
12
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/jhash.h>
16#include <linux/if_vlan.h>
17#include <net/addrconf.h>
18#include "cxgb4.h"
19#include "clip_tbl.h"
20
21static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key)
22{
23 unsigned int clipt_size_half = c->clipt_size / 2;
24
25 return jhash_1word(*key, 0) % clipt_size_half;
26}
27
28static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
29{
30 unsigned int clipt_size_half = d->clipt_size / 2;
31 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
32
33 return clipt_size_half +
34 (jhash_1word(xor, 0) % clipt_size_half);
35}
36
37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
38 int addr_len)
39{
40 return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
41 ipv6_clip_hash(ctbl, addr);
42}
43
44static int clip6_get_mbox(const struct net_device *dev,
45 const struct in6_addr *lip)
46{
47 struct adapter *adap = netdev2adap(dev);
48 struct fw_clip_cmd c;
49
50 memset(&c, 0, sizeof(c));
51 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
52 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
53 c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
54 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
55 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
56 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
57}
58
59static int clip6_release_mbox(const struct net_device *dev,
60 const struct in6_addr *lip)
61{
62 struct adapter *adap = netdev2adap(dev);
63 struct fw_clip_cmd c;
64
65 memset(&c, 0, sizeof(c));
66 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
67 FW_CMD_REQUEST_F | FW_CMD_READ_F);
68 c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
69 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
70 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
71 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
72}
73
74int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
75{
76 struct adapter *adap = netdev2adap(dev);
77 struct clip_tbl *ctbl = adap->clipt;
78 struct clip_entry *ce, *cte;
79 u32 *addr = (u32 *)lip;
80 int hash;
81 int addr_len;
82 int ret = 0;
83
84 if (!ctbl)
85 return 0;
86
87 if (v6)
88 addr_len = 16;
89 else
90 addr_len = 4;
91
92 hash = clip_addr_hash(ctbl, addr, addr_len);
93
94 read_lock_bh(&ctbl->lock);
95 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
96 if (addr_len == cte->addr_len &&
97 memcmp(lip, cte->addr, cte->addr_len) == 0) {
98 ce = cte;
99 read_unlock_bh(&ctbl->lock);
100 goto found;
101 }
102 }
103 read_unlock_bh(&ctbl->lock);
104
105 write_lock_bh(&ctbl->lock);
106 if (!list_empty(&ctbl->ce_free_head)) {
107 ce = list_first_entry(&ctbl->ce_free_head,
108 struct clip_entry, list);
109 list_del(&ce->list);
110 INIT_LIST_HEAD(&ce->list);
111 spin_lock_init(&ce->lock);
112 atomic_set(&ce->refcnt, 0);
113 atomic_dec(&ctbl->nfree);
114 ce->addr_len = addr_len;
115 memcpy(ce->addr, lip, addr_len);
116 list_add_tail(&ce->list, &ctbl->hash_list[hash]);
117 if (v6) {
118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
119 if (ret) {
120 write_unlock_bh(&ctbl->lock);
121 return ret;
122 }
123 }
124 } else {
125 write_unlock_bh(&ctbl->lock);
126 return -ENOMEM;
127 }
128 write_unlock_bh(&ctbl->lock);
129found:
130 atomic_inc(&ce->refcnt);
131
132 return 0;
133}
134EXPORT_SYMBOL(cxgb4_clip_get);
135
136void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
137{
138 struct adapter *adap = netdev2adap(dev);
139 struct clip_tbl *ctbl = adap->clipt;
140 struct clip_entry *ce, *cte;
141 u32 *addr = (u32 *)lip;
142 int hash;
143 int addr_len;
144
145 if (v6)
146 addr_len = 16;
147 else
148 addr_len = 4;
149
150 hash = clip_addr_hash(ctbl, addr, addr_len);
151
152 read_lock_bh(&ctbl->lock);
153 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
154 if (addr_len == cte->addr_len &&
155 memcmp(lip, cte->addr, cte->addr_len) == 0) {
156 ce = cte;
157 read_unlock_bh(&ctbl->lock);
158 goto found;
159 }
160 }
161 read_unlock_bh(&ctbl->lock);
162
163 return;
164found:
165 write_lock_bh(&ctbl->lock);
166 spin_lock_bh(&ce->lock);
167 if (atomic_dec_and_test(&ce->refcnt)) {
168 list_del(&ce->list);
169 INIT_LIST_HEAD(&ce->list);
170 list_add_tail(&ce->list, &ctbl->ce_free_head);
171 atomic_inc(&ctbl->nfree);
172 if (v6)
173 clip6_release_mbox(dev, (const struct in6_addr *)lip);
174 }
175 spin_unlock_bh(&ce->lock);
176 write_unlock_bh(&ctbl->lock);
177}
178EXPORT_SYMBOL(cxgb4_clip_release);
179
180/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
181 * a physical device.
182 * The physical device reference is needed to send the actul CLIP command.
183 */
184static int cxgb4_update_dev_clip(struct net_device *root_dev,
185 struct net_device *dev)
186{
187 struct inet6_dev *idev = NULL;
188 struct inet6_ifaddr *ifa;
189 int ret = 0;
190
191 idev = __in6_dev_get(root_dev);
192 if (!idev)
193 return ret;
194
195 read_lock_bh(&idev->lock);
196 list_for_each_entry(ifa, &idev->addr_list, if_list) {
197 ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1);
198 if (ret < 0)
199 break;
200 }
201 read_unlock_bh(&idev->lock);
202
203 return ret;
204}
205
206int cxgb4_update_root_dev_clip(struct net_device *dev)
207{
208 struct net_device *root_dev = NULL;
209 int i, ret = 0;
210
211 /* First populate the real net device's IPv6 addresses */
212 ret = cxgb4_update_dev_clip(dev, dev);
213 if (ret)
214 return ret;
215
216 /* Parse all bond and vlan devices layered on top of the physical dev */
217 root_dev = netdev_master_upper_dev_get_rcu(dev);
218 if (root_dev) {
219 ret = cxgb4_update_dev_clip(root_dev, dev);
220 if (ret)
221 return ret;
222 }
223
224 for (i = 0; i < VLAN_N_VID; i++) {
225 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
226 if (!root_dev)
227 continue;
228
229 ret = cxgb4_update_dev_clip(root_dev, dev);
230 if (ret)
231 break;
232 }
233
234 return ret;
235}
236EXPORT_SYMBOL(cxgb4_update_root_dev_clip);
237
238int clip_tbl_show(struct seq_file *seq, void *v)
239{
240 struct adapter *adapter = seq->private;
241 struct clip_tbl *ctbl = adapter->clipt;
242 struct clip_entry *ce;
243 char ip[60];
244 int i;
245
246 read_lock_bh(&ctbl->lock);
247
248 seq_puts(seq, "IP Address Users\n");
249 for (i = 0 ; i < ctbl->clipt_size; ++i) {
250 list_for_each_entry(ce, &ctbl->hash_list[i], list) {
251 ip[0] = '\0';
252 if (ce->addr_len == 16)
253 sprintf(ip, "%pI6c", ce->addr);
254 else
255 sprintf(ip, "%pI4c", ce->addr);
256 seq_printf(seq, "%-25s %u\n", ip,
257 atomic_read(&ce->refcnt));
258 }
259 }
260 seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
261
262 read_unlock_bh(&ctbl->lock);
263
264 return 0;
265}
266
267struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
268 unsigned int clipt_end)
269{
270 struct clip_entry *cl_list;
271 struct clip_tbl *ctbl;
272 unsigned int clipt_size;
273 int i;
274
275 if (clipt_start >= clipt_end)
276 return NULL;
277 clipt_size = clipt_end - clipt_start + 1;
278 if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
279 return NULL;
280
281 ctbl = t4_alloc_mem(sizeof(*ctbl) +
282 clipt_size*sizeof(struct list_head));
283 if (!ctbl)
284 return NULL;
285
286 ctbl->clipt_start = clipt_start;
287 ctbl->clipt_size = clipt_size;
288 INIT_LIST_HEAD(&ctbl->ce_free_head);
289
290 atomic_set(&ctbl->nfree, clipt_size);
291 rwlock_init(&ctbl->lock);
292
293 for (i = 0; i < ctbl->clipt_size; ++i)
294 INIT_LIST_HEAD(&ctbl->hash_list[i]);
295
296 cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
297 ctbl->cl_list = (void *)cl_list;
298
299 for (i = 0; i < clipt_size; i++) {
300 INIT_LIST_HEAD(&cl_list[i].list);
301 list_add_tail(&cl_list[i].list, &ctbl->ce_free_head);
302 }
303
304 return ctbl;
305}
306
307void t4_cleanup_clip_tbl(struct adapter *adap)
308{
309 struct clip_tbl *ctbl = adap->clipt;
310
311 if (ctbl) {
312 if (ctbl->cl_list)
313 t4_free_mem(ctbl->cl_list);
314 t4_free_mem(ctbl);
315 }
316}
317EXPORT_SYMBOL(t4_cleanup_clip_tbl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
new file mode 100644
index 000000000000..2eaba0161cf8
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -0,0 +1,41 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved.
4 *
5 * Written by Deepak (deepak.s@chelsio.com)
6 *
7 * This program is distributed in the hope that it will be useful, but WITHOUT
8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
10 * release for licensing terms and conditions.
11 */
12
13struct clip_entry {
14 spinlock_t lock; /* Hold while modifying clip reference */
15 atomic_t refcnt;
16 struct list_head list;
17 u32 addr[4];
18 int addr_len;
19};
20
21struct clip_tbl {
22 unsigned int clipt_start;
23 unsigned int clipt_size;
24 rwlock_t lock;
25 atomic_t nfree;
26 struct list_head ce_free_head;
27 void *cl_list;
28 struct list_head hash_list[0];
29};
30
31enum {
32 CLIPT_MIN_HASH_BUCKETS = 2,
33};
34
35struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
36 unsigned int clipt_end);
37int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6);
38void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6);
39int clip_tbl_show(struct seq_file *seq, void *v);
40int cxgb4_update_root_dev_clip(struct net_device *dev);
41void t4_cleanup_clip_tbl(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 5ab5c3133acd..d6cda17efe6e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -49,16 +49,6 @@
49#include <asm/io.h> 49#include <asm/io.h>
50#include "cxgb4_uld.h" 50#include "cxgb4_uld.h"
51 51
52#define T4FW_VERSION_MAJOR 0x01
53#define T4FW_VERSION_MINOR 0x0C
54#define T4FW_VERSION_MICRO 0x19
55#define T4FW_VERSION_BUILD 0x00
56
57#define T5FW_VERSION_MAJOR 0x01
58#define T5FW_VERSION_MINOR 0x0C
59#define T5FW_VERSION_MICRO 0x19
60#define T5FW_VERSION_BUILD 0x00
61
62#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) 52#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
63 53
64enum { 54enum {
@@ -231,6 +221,7 @@ struct sge_params {
231struct tp_params { 221struct tp_params {
232 unsigned int ntxchan; /* # of Tx channels */ 222 unsigned int ntxchan; /* # of Tx channels */
233 unsigned int tre; /* log2 of core clocks per TP tick */ 223 unsigned int tre; /* log2 of core clocks per TP tick */
224 unsigned int la_mask; /* what events are recorded by TP LA */
234 unsigned short tx_modq_map; /* TX modulation scheduler queue to */ 225 unsigned short tx_modq_map; /* TX modulation scheduler queue to */
235 /* channel map */ 226 /* channel map */
236 227
@@ -290,11 +281,21 @@ enum chip_type {
290 T5_LAST_REV = T5_A1, 281 T5_LAST_REV = T5_A1,
291}; 282};
292 283
284struct devlog_params {
285 u32 memtype; /* which memory (EDC0, EDC1, MC) */
286 u32 start; /* start of log in firmware memory */
287 u32 size; /* size of log */
288};
289
293struct adapter_params { 290struct adapter_params {
294 struct sge_params sge; 291 struct sge_params sge;
295 struct tp_params tp; 292 struct tp_params tp;
296 struct vpd_params vpd; 293 struct vpd_params vpd;
297 struct pci_params pci; 294 struct pci_params pci;
295 struct devlog_params devlog;
296 enum pcie_memwin drv_memwin;
297
298 unsigned int cim_la_size;
298 299
299 unsigned int sf_size; /* serial flash size in bytes */ 300 unsigned int sf_size; /* serial flash size in bytes */
300 unsigned int sf_nsec; /* # of flash sectors */ 301 unsigned int sf_nsec; /* # of flash sectors */
@@ -476,6 +477,22 @@ struct sge_rspq { /* state for an SGE response queue */
476 struct adapter *adap; 477 struct adapter *adap;
477 struct net_device *netdev; /* associated net device */ 478 struct net_device *netdev; /* associated net device */
478 rspq_handler_t handler; 479 rspq_handler_t handler;
480#ifdef CONFIG_NET_RX_BUSY_POLL
481#define CXGB_POLL_STATE_IDLE 0
482#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */
483#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */
484#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */
485#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */
486#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \
487 CXGB_POLL_STATE_POLL_YIELD)
488#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \
489 CXGB_POLL_STATE_POLL)
490#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \
491 CXGB_POLL_STATE_POLL_YIELD)
492 unsigned int bpoll_state;
493 spinlock_t bpoll_lock; /* lock for busy poll */
494#endif /* CONFIG_NET_RX_BUSY_POLL */
495
479}; 496};
480 497
481struct sge_eth_stats { /* Ethernet queue statistics */ 498struct sge_eth_stats { /* Ethernet queue statistics */
@@ -658,6 +675,9 @@ struct adapter {
658 unsigned int l2t_start; 675 unsigned int l2t_start;
659 unsigned int l2t_end; 676 unsigned int l2t_end;
660 struct l2t_data *l2t; 677 struct l2t_data *l2t;
678 unsigned int clipt_start;
679 unsigned int clipt_end;
680 struct clip_tbl *clipt;
661 void *uld_handle[CXGB4_ULD_MAX]; 681 void *uld_handle[CXGB4_ULD_MAX];
662 struct list_head list_node; 682 struct list_head list_node;
663 struct list_head rcu_node; 683 struct list_head rcu_node;
@@ -877,6 +897,102 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
877 return netdev2pinfo(dev)->adapter; 897 return netdev2pinfo(dev)->adapter;
878} 898}
879 899
900#ifdef CONFIG_NET_RX_BUSY_POLL
901static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
902{
903 spin_lock_init(&q->bpoll_lock);
904 q->bpoll_state = CXGB_POLL_STATE_IDLE;
905}
906
907static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
908{
909 bool rc = true;
910
911 spin_lock(&q->bpoll_lock);
912 if (q->bpoll_state & CXGB_POLL_LOCKED) {
913 q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
914 rc = false;
915 } else {
916 q->bpoll_state = CXGB_POLL_STATE_NAPI;
917 }
918 spin_unlock(&q->bpoll_lock);
919 return rc;
920}
921
922static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
923{
924 bool rc = false;
925
926 spin_lock(&q->bpoll_lock);
927 if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
928 rc = true;
929 q->bpoll_state = CXGB_POLL_STATE_IDLE;
930 spin_unlock(&q->bpoll_lock);
931 return rc;
932}
933
934static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
935{
936 bool rc = true;
937
938 spin_lock_bh(&q->bpoll_lock);
939 if (q->bpoll_state & CXGB_POLL_LOCKED) {
940 q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
941 rc = false;
942 } else {
943 q->bpoll_state |= CXGB_POLL_STATE_POLL;
944 }
945 spin_unlock_bh(&q->bpoll_lock);
946 return rc;
947}
948
949static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
950{
951 bool rc = false;
952
953 spin_lock_bh(&q->bpoll_lock);
954 if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
955 rc = true;
956 q->bpoll_state = CXGB_POLL_STATE_IDLE;
957 spin_unlock_bh(&q->bpoll_lock);
958 return rc;
959}
960
961static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
962{
963 return q->bpoll_state & CXGB_POLL_USER_PEND;
964}
965#else
966static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
967{
968}
969
970static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
971{
972 return true;
973}
974
975static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
976{
977 return false;
978}
979
980static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
981{
982 return false;
983}
984
985static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
986{
987 return false;
988}
989
990static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
991{
992 return false;
993}
994#endif /* CONFIG_NET_RX_BUSY_POLL */
995
880void t4_os_portmod_changed(const struct adapter *adap, int port_id); 996void t4_os_portmod_changed(const struct adapter *adap, int port_id);
881void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); 997void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
882 998
@@ -905,6 +1021,7 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
905int t4_sge_init(struct adapter *adap); 1021int t4_sge_init(struct adapter *adap);
906void t4_sge_start(struct adapter *adap); 1022void t4_sge_start(struct adapter *adap);
907void t4_sge_stop(struct adapter *adap); 1023void t4_sge_stop(struct adapter *adap);
1024int cxgb_busy_poll(struct napi_struct *napi);
908extern int dbfifo_int_thresh; 1025extern int dbfifo_int_thresh;
909 1026
910#define for_each_port(adapter, iter) \ 1027#define for_each_port(adapter, iter) \
@@ -995,12 +1112,16 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
995 1112
996int t4_seeprom_wp(struct adapter *adapter, bool enable); 1113int t4_seeprom_wp(struct adapter *adapter, bool enable);
997int get_vpd_params(struct adapter *adapter, struct vpd_params *p); 1114int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
1115int t4_read_flash(struct adapter *adapter, unsigned int addr,
1116 unsigned int nwords, u32 *data, int byte_oriented);
998int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 1117int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
1118int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
999int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 1119int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
1000 const u8 *fw_data, unsigned int size, int force); 1120 const u8 *fw_data, unsigned int size, int force);
1001unsigned int t4_flash_cfg_addr(struct adapter *adapter); 1121unsigned int t4_flash_cfg_addr(struct adapter *adapter);
1002int t4_get_fw_version(struct adapter *adapter, u32 *vers); 1122int t4_get_fw_version(struct adapter *adapter, u32 *vers);
1003int t4_get_tp_version(struct adapter *adapter, u32 *vers); 1123int t4_get_tp_version(struct adapter *adapter, u32 *vers);
1124int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
1004int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, 1125int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1005 const u8 *fw_data, unsigned int fw_size, 1126 const u8 *fw_data, unsigned int fw_size,
1006 struct fw_hdr *card_fw, enum dev_state state, int *reset); 1127 struct fw_hdr *card_fw, enum dev_state state, int *reset);
@@ -1013,6 +1134,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
1013 u64 *pbar2_qoffset, 1134 u64 *pbar2_qoffset,
1014 unsigned int *pbar2_qid); 1135 unsigned int *pbar2_qid);
1015 1136
1137unsigned int qtimer_val(const struct adapter *adap,
1138 const struct sge_rspq *q);
1016int t4_init_sge_params(struct adapter *adapter); 1139int t4_init_sge_params(struct adapter *adapter);
1017int t4_init_tp_params(struct adapter *adap); 1140int t4_init_tp_params(struct adapter *adap);
1018int t4_filter_field_shift(const struct adapter *adap, int filter_sel); 1141int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
@@ -1022,20 +1145,46 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1022 int start, int n, const u16 *rspq, unsigned int nrspq); 1145 int start, int n, const u16 *rspq, unsigned int nrspq);
1023int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 1146int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1024 unsigned int flags); 1147 unsigned int flags);
1148int t4_read_rss(struct adapter *adapter, u16 *entries);
1149void t4_read_rss_key(struct adapter *adapter, u32 *key);
1150void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
1151void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
1152 u32 *valp);
1153void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
1154 u32 *vfl, u32 *vfh);
1155u32 t4_read_rss_pf_map(struct adapter *adapter);
1156u32 t4_read_rss_pf_mask(struct adapter *adapter);
1157
1025int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 1158int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
1026 u64 *parity); 1159 u64 *parity);
1027int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 1160int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
1028 u64 *parity); 1161 u64 *parity);
1162void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
1163void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
1164int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
1165 size_t n);
1166int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data,
1167 size_t n);
1168int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1169 unsigned int *valp);
1170int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1171 const unsigned int *valp);
1172int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
1173void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
1029const char *t4_get_port_type_description(enum fw_port_type port_type); 1174const char *t4_get_port_type_description(enum fw_port_type port_type);
1030void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); 1175void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
1031void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); 1176void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
1177void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
1032void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 1178void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1033 unsigned int mask, unsigned int val); 1179 unsigned int mask, unsigned int val);
1180void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
1034void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 1181void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1035 struct tp_tcp_stats *v6); 1182 struct tp_tcp_stats *v6);
1036void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 1183void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1037 const unsigned short *alpha, const unsigned short *beta); 1184 const unsigned short *alpha, const unsigned short *beta);
1038 1185
1186void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
1187
1039void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); 1188void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
1040 1189
1041void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 1190void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index a35d1ec6950e..6074680bc985 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -22,7 +22,7 @@
22 22
23/* DCBx version control 23/* DCBx version control
24 */ 24 */
25char *dcb_ver_array[] = { 25static const char * const dcb_ver_array[] = {
26 "Unknown", 26 "Unknown",
27 "DCBx-CIN", 27 "DCBx-CIN",
28 "DCBx-CEE 1.01", 28 "DCBx-CEE 1.01",
@@ -428,7 +428,10 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
428 } 428 }
429 *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf; 429 *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf;
430 430
431 INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); 431 if (local)
432 INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
433 else
434 INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
432 pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; 435 pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
433 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); 436 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
434 if (err != FW_PORT_DCB_CFG_SUCCESS) { 437 if (err != FW_PORT_DCB_CFG_SUCCESS) {
@@ -900,6 +903,88 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev,
900 (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); 903 (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
901} 904}
902 905
906static int cxgb4_ieee_read_ets(struct net_device *dev, struct ieee_ets *ets,
907 int local)
908{
909 struct port_info *pi = netdev2pinfo(dev);
910 struct port_dcb_info *dcb = &pi->dcb;
911 struct adapter *adap = pi->adapter;
912 uint32_t tc_info;
913 struct fw_port_cmd pcmd;
914 int i, bwg, err;
915
916 if (!(dcb->msgs & (CXGB4_DCB_FW_PGID | CXGB4_DCB_FW_PGRATE)))
917 return 0;
918
919 ets->ets_cap = dcb->pg_num_tcs_supported;
920
921 if (local) {
922 ets->willing = 1;
923 INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
924 } else {
925 INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
926 }
927
928 pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
929 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
930 if (err != FW_PORT_DCB_CFG_SUCCESS) {
931 dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
932 return err;
933 }
934
935 tc_info = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
936
937 if (local)
938 INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
939 else
940 INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
941
942 pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
943 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
944 if (err != FW_PORT_DCB_CFG_SUCCESS) {
945 dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
946 -err);
947 return err;
948 }
949
950 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
951 bwg = (tc_info >> ((7 - i) * 4)) & 0xF;
952 ets->prio_tc[i] = bwg;
953 ets->tc_tx_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
954 ets->tc_rx_bw[i] = ets->tc_tx_bw[i];
955 ets->tc_tsa[i] = pcmd.u.dcb.pgrate.tsa[i];
956 }
957
958 return 0;
959}
960
961static int cxgb4_ieee_get_ets(struct net_device *dev, struct ieee_ets *ets)
962{
963 return cxgb4_ieee_read_ets(dev, ets, 1);
964}
965
966/* We reuse this for peer PFC as well, as we can't have it enabled one way */
967static int cxgb4_ieee_get_pfc(struct net_device *dev, struct ieee_pfc *pfc)
968{
969 struct port_info *pi = netdev2pinfo(dev);
970 struct port_dcb_info *dcb = &pi->dcb;
971
972 memset(pfc, 0, sizeof(struct ieee_pfc));
973
974 if (!(dcb->msgs & CXGB4_DCB_FW_PFC))
975 return 0;
976
977 pfc->pfc_cap = dcb->pfc_num_tcs_supported;
978 pfc->pfc_en = bitswap_1(dcb->pfcen);
979
980 return 0;
981}
982
983static int cxgb4_ieee_peer_ets(struct net_device *dev, struct ieee_ets *ets)
984{
985 return cxgb4_ieee_read_ets(dev, ets, 0);
986}
987
903/* Fill in the Application User Priority Map associated with the 988/* Fill in the Application User Priority Map associated with the
904 * specified Application. 989 * specified Application.
905 * Priority for IEEE dcb_app is an integer, with 0 being a valid value 990 * Priority for IEEE dcb_app is an integer, with 0 being a valid value
@@ -1106,14 +1191,23 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
1106 struct port_info *pi = netdev2pinfo(dev); 1191 struct port_info *pi = netdev2pinfo(dev);
1107 1192
1108 cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported)); 1193 cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported));
1109 pfc->pfc_en = pi->dcb.pfcen; 1194
1195 /* Firmware sends this to us in a formwat that is a bit flipped version
1196 * of spec, correct it before we send it to host. This is taken care of
1197 * by bit shifting in other uses of pfcen
1198 */
1199 pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
1110 1200
1111 return 0; 1201 return 0;
1112} 1202}
1113 1203
1114const struct dcbnl_rtnl_ops cxgb4_dcb_ops = { 1204const struct dcbnl_rtnl_ops cxgb4_dcb_ops = {
1205 .ieee_getets = cxgb4_ieee_get_ets,
1206 .ieee_getpfc = cxgb4_ieee_get_pfc,
1115 .ieee_getapp = cxgb4_ieee_getapp, 1207 .ieee_getapp = cxgb4_ieee_getapp,
1116 .ieee_setapp = cxgb4_ieee_setapp, 1208 .ieee_setapp = cxgb4_ieee_setapp,
1209 .ieee_peer_getets = cxgb4_ieee_peer_ets,
1210 .ieee_peer_getpfc = cxgb4_ieee_get_pfc,
1117 1211
1118 /* CEE std */ 1212 /* CEE std */
1119 .getstate = cxgb4_getstate, 1213 .getstate = cxgb4_getstate,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 31ce425616c9..ccf24d3dc982 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -136,6 +136,17 @@ void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
136void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *); 136void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
137extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops; 137extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
138 138
139static inline __u8 bitswap_1(unsigned char val)
140{
141 return ((val & 0x80) >> 7) |
142 ((val & 0x40) >> 5) |
143 ((val & 0x20) >> 3) |
144 ((val & 0x10) >> 1) |
145 ((val & 0x08) << 1) |
146 ((val & 0x04) << 3) |
147 ((val & 0x02) << 5) |
148 ((val & 0x01) << 7);
149}
139#define CXGB4_DCB_ENABLED true 150#define CXGB4_DCB_ENABLED true
140 151
141#else /* !CONFIG_CHELSIO_T4_DCB */ 152#else /* !CONFIG_CHELSIO_T4_DCB */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c98a350d857e..d221f6b28fcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -36,13 +36,1867 @@
36#include <linux/debugfs.h> 36#include <linux/debugfs.h>
37#include <linux/string_helpers.h> 37#include <linux/string_helpers.h>
38#include <linux/sort.h> 38#include <linux/sort.h>
39#include <linux/ctype.h>
39 40
40#include "cxgb4.h" 41#include "cxgb4.h"
41#include "t4_regs.h" 42#include "t4_regs.h"
43#include "t4_values.h"
42#include "t4fw_api.h" 44#include "t4fw_api.h"
43#include "cxgb4_debugfs.h" 45#include "cxgb4_debugfs.h"
46#include "clip_tbl.h"
44#include "l2t.h" 47#include "l2t.h"
45 48
49/* generic seq_file support for showing a table of size rows x width. */
50static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
51{
52 pos -= tb->skip_first;
53 return pos >= tb->rows ? NULL : &tb->data[pos * tb->width];
54}
55
56static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
57{
58 struct seq_tab *tb = seq->private;
59
60 if (tb->skip_first && *pos == 0)
61 return SEQ_START_TOKEN;
62
63 return seq_tab_get_idx(tb, *pos);
64}
65
66static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
67{
68 v = seq_tab_get_idx(seq->private, *pos + 1);
69 if (v)
70 ++*pos;
71 return v;
72}
73
74static void seq_tab_stop(struct seq_file *seq, void *v)
75{
76}
77
78static int seq_tab_show(struct seq_file *seq, void *v)
79{
80 const struct seq_tab *tb = seq->private;
81
82 return tb->show(seq, v, ((char *)v - tb->data) / tb->width);
83}
84
85static const struct seq_operations seq_tab_ops = {
86 .start = seq_tab_start,
87 .next = seq_tab_next,
88 .stop = seq_tab_stop,
89 .show = seq_tab_show
90};
91
92struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
93 unsigned int width, unsigned int have_header,
94 int (*show)(struct seq_file *seq, void *v, int i))
95{
96 struct seq_tab *p;
97
98 p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width);
99 if (p) {
100 p->show = show;
101 p->rows = rows;
102 p->width = width;
103 p->skip_first = have_header != 0;
104 }
105 return p;
106}
107
108/* Trim the size of a seq_tab to the supplied number of rows. The operation is
109 * irreversible.
110 */
111static int seq_tab_trim(struct seq_tab *p, unsigned int new_rows)
112{
113 if (new_rows > p->rows)
114 return -EINVAL;
115 p->rows = new_rows;
116 return 0;
117}
118
119static int cim_la_show(struct seq_file *seq, void *v, int idx)
120{
121 if (v == SEQ_START_TOKEN)
122 seq_puts(seq, "Status Data PC LS0Stat LS0Addr "
123 " LS0Data\n");
124 else {
125 const u32 *p = v;
126
127 seq_printf(seq,
128 " %02x %x%07x %x%07x %08x %08x %08x%08x%08x%08x\n",
129 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
130 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
131 p[6], p[7]);
132 }
133 return 0;
134}
135
136static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
137{
138 if (v == SEQ_START_TOKEN) {
139 seq_puts(seq, "Status Data PC\n");
140 } else {
141 const u32 *p = v;
142
143 seq_printf(seq, " %02x %08x %08x\n", p[5] & 0xff, p[6],
144 p[7]);
145 seq_printf(seq, " %02x %02x%06x %02x%06x\n",
146 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
147 p[4] & 0xff, p[5] >> 8);
148 seq_printf(seq, " %02x %x%07x %x%07x\n", (p[0] >> 4) & 0xff,
149 p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4);
150 }
151 return 0;
152}
153
154static int cim_la_open(struct inode *inode, struct file *file)
155{
156 int ret;
157 unsigned int cfg;
158 struct seq_tab *p;
159 struct adapter *adap = inode->i_private;
160
161 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
162 if (ret)
163 return ret;
164
165 p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
166 cfg & UPDBGLACAPTPCONLY_F ?
167 cim_la_show_3in1 : cim_la_show);
168 if (!p)
169 return -ENOMEM;
170
171 ret = t4_cim_read_la(adap, (u32 *)p->data, NULL);
172 if (ret)
173 seq_release_private(inode, file);
174 return ret;
175}
176
177static const struct file_operations cim_la_fops = {
178 .owner = THIS_MODULE,
179 .open = cim_la_open,
180 .read = seq_read,
181 .llseek = seq_lseek,
182 .release = seq_release_private
183};
184
185static int cim_qcfg_show(struct seq_file *seq, void *v)
186{
187 static const char * const qname[] = {
188 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",
189 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",
190 "SGE0-RX", "SGE1-RX"
191 };
192
193 int i;
194 struct adapter *adap = seq->private;
195 u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
196 u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
197 u32 stat[(4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5))];
198 u16 thres[CIM_NUM_IBQ];
199 u32 obq_wr_t4[2 * CIM_NUM_OBQ], *wr;
200 u32 obq_wr_t5[2 * CIM_NUM_OBQ_T5];
201 u32 *p = stat;
202 int cim_num_obq = is_t4(adap->params.chip) ?
203 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
204
205 i = t4_cim_read(adap, is_t4(adap->params.chip) ? UP_IBQ_0_RDADDR_A :
206 UP_IBQ_0_SHADOW_RDADDR_A,
207 ARRAY_SIZE(stat), stat);
208 if (!i) {
209 if (is_t4(adap->params.chip)) {
210 i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
211 ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
212 wr = obq_wr_t4;
213 } else {
214 i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
215 ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
216 wr = obq_wr_t5;
217 }
218 }
219 if (i)
220 return i;
221
222 t4_read_cimq_cfg(adap, base, size, thres);
223
224 seq_printf(seq,
225 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail\n");
226 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
227 seq_printf(seq, "%7s %5x %5u %5u %6x %4x %4u %4u %5u\n",
228 qname[i], base[i], size[i], thres[i],
229 IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]),
230 QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
231 QUEREMFLITS_G(p[2]) * 16);
232 for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2)
233 seq_printf(seq, "%7s %5x %5u %12x %4x %4u %4u %5u\n",
234 qname[i], base[i], size[i],
235 QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i],
236 QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]),
237 QUEREMFLITS_G(p[2]) * 16);
238 return 0;
239}
240
241static int cim_qcfg_open(struct inode *inode, struct file *file)
242{
243 return single_open(file, cim_qcfg_show, inode->i_private);
244}
245
246static const struct file_operations cim_qcfg_fops = {
247 .owner = THIS_MODULE,
248 .open = cim_qcfg_open,
249 .read = seq_read,
250 .llseek = seq_lseek,
251 .release = single_release,
252};
253
254static int cimq_show(struct seq_file *seq, void *v, int idx)
255{
256 const u32 *p = v;
257
258 seq_printf(seq, "%#06x: %08x %08x %08x %08x\n", idx * 16, p[0], p[1],
259 p[2], p[3]);
260 return 0;
261}
262
263static int cim_ibq_open(struct inode *inode, struct file *file)
264{
265 int ret;
266 struct seq_tab *p;
267 unsigned int qid = (uintptr_t)inode->i_private & 7;
268 struct adapter *adap = inode->i_private - qid;
269
270 p = seq_open_tab(file, CIM_IBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
271 if (!p)
272 return -ENOMEM;
273
274 ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4);
275 if (ret < 0)
276 seq_release_private(inode, file);
277 else
278 ret = 0;
279 return ret;
280}
281
282static const struct file_operations cim_ibq_fops = {
283 .owner = THIS_MODULE,
284 .open = cim_ibq_open,
285 .read = seq_read,
286 .llseek = seq_lseek,
287 .release = seq_release_private
288};
289
290static int cim_obq_open(struct inode *inode, struct file *file)
291{
292 int ret;
293 struct seq_tab *p;
294 unsigned int qid = (uintptr_t)inode->i_private & 7;
295 struct adapter *adap = inode->i_private - qid;
296
297 p = seq_open_tab(file, 6 * CIM_OBQ_SIZE, 4 * sizeof(u32), 0, cimq_show);
298 if (!p)
299 return -ENOMEM;
300
301 ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4);
302 if (ret < 0) {
303 seq_release_private(inode, file);
304 } else {
305 seq_tab_trim(p, ret / 4);
306 ret = 0;
307 }
308 return ret;
309}
310
311static const struct file_operations cim_obq_fops = {
312 .owner = THIS_MODULE,
313 .open = cim_obq_open,
314 .read = seq_read,
315 .llseek = seq_lseek,
316 .release = seq_release_private
317};
318
319struct field_desc {
320 const char *name;
321 unsigned int start;
322 unsigned int width;
323};
324
325static void field_desc_show(struct seq_file *seq, u64 v,
326 const struct field_desc *p)
327{
328 char buf[32];
329 int line_size = 0;
330
331 while (p->name) {
332 u64 mask = (1ULL << p->width) - 1;
333 int len = scnprintf(buf, sizeof(buf), "%s: %llu", p->name,
334 ((unsigned long long)v >> p->start) & mask);
335
336 if (line_size + len >= 79) {
337 line_size = 8;
338 seq_puts(seq, "\n ");
339 }
340 seq_printf(seq, "%s ", buf);
341 line_size += len + 1;
342 p++;
343 }
344 seq_putc(seq, '\n');
345}
346
347static struct field_desc tp_la0[] = {
348 { "RcfOpCodeOut", 60, 4 },
349 { "State", 56, 4 },
350 { "WcfState", 52, 4 },
351 { "RcfOpcSrcOut", 50, 2 },
352 { "CRxError", 49, 1 },
353 { "ERxError", 48, 1 },
354 { "SanityFailed", 47, 1 },
355 { "SpuriousMsg", 46, 1 },
356 { "FlushInputMsg", 45, 1 },
357 { "FlushInputCpl", 44, 1 },
358 { "RssUpBit", 43, 1 },
359 { "RssFilterHit", 42, 1 },
360 { "Tid", 32, 10 },
361 { "InitTcb", 31, 1 },
362 { "LineNumber", 24, 7 },
363 { "Emsg", 23, 1 },
364 { "EdataOut", 22, 1 },
365 { "Cmsg", 21, 1 },
366 { "CdataOut", 20, 1 },
367 { "EreadPdu", 19, 1 },
368 { "CreadPdu", 18, 1 },
369 { "TunnelPkt", 17, 1 },
370 { "RcfPeerFin", 16, 1 },
371 { "RcfReasonOut", 12, 4 },
372 { "TxCchannel", 10, 2 },
373 { "RcfTxChannel", 8, 2 },
374 { "RxEchannel", 6, 2 },
375 { "RcfRxChannel", 5, 1 },
376 { "RcfDataOutSrdy", 4, 1 },
377 { "RxDvld", 3, 1 },
378 { "RxOoDvld", 2, 1 },
379 { "RxCongestion", 1, 1 },
380 { "TxCongestion", 0, 1 },
381 { NULL }
382};
383
384static int tp_la_show(struct seq_file *seq, void *v, int idx)
385{
386 const u64 *p = v;
387
388 field_desc_show(seq, *p, tp_la0);
389 return 0;
390}
391
392static int tp_la_show2(struct seq_file *seq, void *v, int idx)
393{
394 const u64 *p = v;
395
396 if (idx)
397 seq_putc(seq, '\n');
398 field_desc_show(seq, p[0], tp_la0);
399 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
400 field_desc_show(seq, p[1], tp_la0);
401 return 0;
402}
403
404static int tp_la_show3(struct seq_file *seq, void *v, int idx)
405{
406 static struct field_desc tp_la1[] = {
407 { "CplCmdIn", 56, 8 },
408 { "CplCmdOut", 48, 8 },
409 { "ESynOut", 47, 1 },
410 { "EAckOut", 46, 1 },
411 { "EFinOut", 45, 1 },
412 { "ERstOut", 44, 1 },
413 { "SynIn", 43, 1 },
414 { "AckIn", 42, 1 },
415 { "FinIn", 41, 1 },
416 { "RstIn", 40, 1 },
417 { "DataIn", 39, 1 },
418 { "DataInVld", 38, 1 },
419 { "PadIn", 37, 1 },
420 { "RxBufEmpty", 36, 1 },
421 { "RxDdp", 35, 1 },
422 { "RxFbCongestion", 34, 1 },
423 { "TxFbCongestion", 33, 1 },
424 { "TxPktSumSrdy", 32, 1 },
425 { "RcfUlpType", 28, 4 },
426 { "Eread", 27, 1 },
427 { "Ebypass", 26, 1 },
428 { "Esave", 25, 1 },
429 { "Static0", 24, 1 },
430 { "Cread", 23, 1 },
431 { "Cbypass", 22, 1 },
432 { "Csave", 21, 1 },
433 { "CPktOut", 20, 1 },
434 { "RxPagePoolFull", 18, 2 },
435 { "RxLpbkPkt", 17, 1 },
436 { "TxLpbkPkt", 16, 1 },
437 { "RxVfValid", 15, 1 },
438 { "SynLearned", 14, 1 },
439 { "SetDelEntry", 13, 1 },
440 { "SetInvEntry", 12, 1 },
441 { "CpcmdDvld", 11, 1 },
442 { "CpcmdSave", 10, 1 },
443 { "RxPstructsFull", 8, 2 },
444 { "EpcmdDvld", 7, 1 },
445 { "EpcmdFlush", 6, 1 },
446 { "EpcmdTrimPrefix", 5, 1 },
447 { "EpcmdTrimPostfix", 4, 1 },
448 { "ERssIp4Pkt", 3, 1 },
449 { "ERssIp6Pkt", 2, 1 },
450 { "ERssTcpUdpPkt", 1, 1 },
451 { "ERssFceFipPkt", 0, 1 },
452 { NULL }
453 };
454 static struct field_desc tp_la2[] = {
455 { "CplCmdIn", 56, 8 },
456 { "MpsVfVld", 55, 1 },
457 { "MpsPf", 52, 3 },
458 { "MpsVf", 44, 8 },
459 { "SynIn", 43, 1 },
460 { "AckIn", 42, 1 },
461 { "FinIn", 41, 1 },
462 { "RstIn", 40, 1 },
463 { "DataIn", 39, 1 },
464 { "DataInVld", 38, 1 },
465 { "PadIn", 37, 1 },
466 { "RxBufEmpty", 36, 1 },
467 { "RxDdp", 35, 1 },
468 { "RxFbCongestion", 34, 1 },
469 { "TxFbCongestion", 33, 1 },
470 { "TxPktSumSrdy", 32, 1 },
471 { "RcfUlpType", 28, 4 },
472 { "Eread", 27, 1 },
473 { "Ebypass", 26, 1 },
474 { "Esave", 25, 1 },
475 { "Static0", 24, 1 },
476 { "Cread", 23, 1 },
477 { "Cbypass", 22, 1 },
478 { "Csave", 21, 1 },
479 { "CPktOut", 20, 1 },
480 { "RxPagePoolFull", 18, 2 },
481 { "RxLpbkPkt", 17, 1 },
482 { "TxLpbkPkt", 16, 1 },
483 { "RxVfValid", 15, 1 },
484 { "SynLearned", 14, 1 },
485 { "SetDelEntry", 13, 1 },
486 { "SetInvEntry", 12, 1 },
487 { "CpcmdDvld", 11, 1 },
488 { "CpcmdSave", 10, 1 },
489 { "RxPstructsFull", 8, 2 },
490 { "EpcmdDvld", 7, 1 },
491 { "EpcmdFlush", 6, 1 },
492 { "EpcmdTrimPrefix", 5, 1 },
493 { "EpcmdTrimPostfix", 4, 1 },
494 { "ERssIp4Pkt", 3, 1 },
495 { "ERssIp6Pkt", 2, 1 },
496 { "ERssTcpUdpPkt", 1, 1 },
497 { "ERssFceFipPkt", 0, 1 },
498 { NULL }
499 };
500 const u64 *p = v;
501
502 if (idx)
503 seq_putc(seq, '\n');
504 field_desc_show(seq, p[0], tp_la0);
505 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
506 field_desc_show(seq, p[1], (p[0] & BIT(17)) ? tp_la2 : tp_la1);
507 return 0;
508}
509
510static int tp_la_open(struct inode *inode, struct file *file)
511{
512 struct seq_tab *p;
513 struct adapter *adap = inode->i_private;
514
515 switch (DBGLAMODE_G(t4_read_reg(adap, TP_DBG_LA_CONFIG_A))) {
516 case 2:
517 p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
518 tp_la_show2);
519 break;
520 case 3:
521 p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0,
522 tp_la_show3);
523 break;
524 default:
525 p = seq_open_tab(file, TPLA_SIZE, sizeof(u64), 0, tp_la_show);
526 }
527 if (!p)
528 return -ENOMEM;
529
530 t4_tp_read_la(adap, (u64 *)p->data, NULL);
531 return 0;
532}
533
534static ssize_t tp_la_write(struct file *file, const char __user *buf,
535 size_t count, loff_t *pos)
536{
537 int err;
538 char s[32];
539 unsigned long val;
540 size_t size = min(sizeof(s) - 1, count);
541 struct adapter *adap = FILE_DATA(file)->i_private;
542
543 if (copy_from_user(s, buf, size))
544 return -EFAULT;
545 s[size] = '\0';
546 err = kstrtoul(s, 0, &val);
547 if (err)
548 return err;
549 if (val > 0xffff)
550 return -EINVAL;
551 adap->params.tp.la_mask = val << 16;
552 t4_set_reg_field(adap, TP_DBG_LA_CONFIG_A, 0xffff0000U,
553 adap->params.tp.la_mask);
554 return count;
555}
556
557static const struct file_operations tp_la_fops = {
558 .owner = THIS_MODULE,
559 .open = tp_la_open,
560 .read = seq_read,
561 .llseek = seq_lseek,
562 .release = seq_release_private,
563 .write = tp_la_write
564};
565
566static int ulprx_la_show(struct seq_file *seq, void *v, int idx)
567{
568 const u32 *p = v;
569
570 if (v == SEQ_START_TOKEN)
571 seq_puts(seq, " Pcmd Type Message"
572 " Data\n");
573 else
574 seq_printf(seq, "%08x%08x %4x %08x %08x%08x%08x%08x\n",
575 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
576 return 0;
577}
578
579static int ulprx_la_open(struct inode *inode, struct file *file)
580{
581 struct seq_tab *p;
582 struct adapter *adap = inode->i_private;
583
584 p = seq_open_tab(file, ULPRX_LA_SIZE, 8 * sizeof(u32), 1,
585 ulprx_la_show);
586 if (!p)
587 return -ENOMEM;
588
589 t4_ulprx_read_la(adap, (u32 *)p->data);
590 return 0;
591}
592
593static const struct file_operations ulprx_la_fops = {
594 .owner = THIS_MODULE,
595 .open = ulprx_la_open,
596 .read = seq_read,
597 .llseek = seq_lseek,
598 .release = seq_release_private
599};
600
601/* Show the PM memory stats. These stats include:
602 *
603 * TX:
604 * Read: memory read operation
605 * Write Bypass: cut-through
606 * Bypass + mem: cut-through and save copy
607 *
608 * RX:
609 * Read: memory read
610 * Write Bypass: cut-through
611 * Flush: payload trim or drop
612 */
613static int pm_stats_show(struct seq_file *seq, void *v)
614{
615 static const char * const tx_pm_stats[] = {
616 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
617 };
618 static const char * const rx_pm_stats[] = {
619 "Read:", "Write bypass:", "Write mem:", "Flush:"
620 };
621
622 int i;
623 u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
624 u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
625 struct adapter *adap = seq->private;
626
627 t4_pmtx_get_stats(adap, tx_cnt, tx_cyc);
628 t4_pmrx_get_stats(adap, rx_cnt, rx_cyc);
629
630 seq_printf(seq, "%13s %10s %20s\n", " ", "Tx pcmds", "Tx bytes");
631 for (i = 0; i < PM_NSTATS - 1; i++)
632 seq_printf(seq, "%-13s %10u %20llu\n",
633 tx_pm_stats[i], tx_cnt[i], tx_cyc[i]);
634
635 seq_printf(seq, "%13s %10s %20s\n", " ", "Rx pcmds", "Rx bytes");
636 for (i = 0; i < PM_NSTATS - 1; i++)
637 seq_printf(seq, "%-13s %10u %20llu\n",
638 rx_pm_stats[i], rx_cnt[i], rx_cyc[i]);
639 return 0;
640}
641
642static int pm_stats_open(struct inode *inode, struct file *file)
643{
644 return single_open(file, pm_stats_show, inode->i_private);
645}
646
647static ssize_t pm_stats_clear(struct file *file, const char __user *buf,
648 size_t count, loff_t *pos)
649{
650 struct adapter *adap = FILE_DATA(file)->i_private;
651
652 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0);
653 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0);
654 return count;
655}
656
657static const struct file_operations pm_stats_debugfs_fops = {
658 .owner = THIS_MODULE,
659 .open = pm_stats_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663 .write = pm_stats_clear
664};
665
666static int cctrl_tbl_show(struct seq_file *seq, void *v)
667{
668 static const char * const dec_fac[] = {
669 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
670 "0.9375" };
671
672 int i;
673 u16 incr[NMTUS][NCCTRL_WIN];
674 struct adapter *adap = seq->private;
675
676 t4_read_cong_tbl(adap, incr);
677
678 for (i = 0; i < NCCTRL_WIN; ++i) {
679 seq_printf(seq, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
680 incr[0][i], incr[1][i], incr[2][i], incr[3][i],
681 incr[4][i], incr[5][i], incr[6][i], incr[7][i]);
682 seq_printf(seq, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
683 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
684 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
685 adap->params.a_wnd[i],
686 dec_fac[adap->params.b_wnd[i]]);
687 }
688 return 0;
689}
690
691DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl);
692
693/* Format a value in a unit that differs from the value's native unit by the
694 * given factor.
695 */
696static char *unit_conv(char *buf, size_t len, unsigned int val,
697 unsigned int factor)
698{
699 unsigned int rem = val % factor;
700
701 if (rem == 0) {
702 snprintf(buf, len, "%u", val / factor);
703 } else {
704 while (rem % 10 == 0)
705 rem /= 10;
706 snprintf(buf, len, "%u.%u", val / factor, rem);
707 }
708 return buf;
709}
710
711static int clk_show(struct seq_file *seq, void *v)
712{
713 char buf[32];
714 struct adapter *adap = seq->private;
715 unsigned int cclk_ps = 1000000000 / adap->params.vpd.cclk; /* in ps */
716 u32 res = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
717 unsigned int tre = TIMERRESOLUTION_G(res);
718 unsigned int dack_re = DELAYEDACKRESOLUTION_G(res);
719 unsigned long long tp_tick_us = (cclk_ps << tre) / 1000000; /* in us */
720
721 seq_printf(seq, "Core clock period: %s ns\n",
722 unit_conv(buf, sizeof(buf), cclk_ps, 1000));
723 seq_printf(seq, "TP timer tick: %s us\n",
724 unit_conv(buf, sizeof(buf), (cclk_ps << tre), 1000000));
725 seq_printf(seq, "TCP timestamp tick: %s us\n",
726 unit_conv(buf, sizeof(buf),
727 (cclk_ps << TIMESTAMPRESOLUTION_G(res)), 1000000));
728 seq_printf(seq, "DACK tick: %s us\n",
729 unit_conv(buf, sizeof(buf), (cclk_ps << dack_re), 1000000));
730 seq_printf(seq, "DACK timer: %u us\n",
731 ((cclk_ps << dack_re) / 1000000) *
732 t4_read_reg(adap, TP_DACK_TIMER_A));
733 seq_printf(seq, "Retransmit min: %llu us\n",
734 tp_tick_us * t4_read_reg(adap, TP_RXT_MIN_A));
735 seq_printf(seq, "Retransmit max: %llu us\n",
736 tp_tick_us * t4_read_reg(adap, TP_RXT_MAX_A));
737 seq_printf(seq, "Persist timer min: %llu us\n",
738 tp_tick_us * t4_read_reg(adap, TP_PERS_MIN_A));
739 seq_printf(seq, "Persist timer max: %llu us\n",
740 tp_tick_us * t4_read_reg(adap, TP_PERS_MAX_A));
741 seq_printf(seq, "Keepalive idle timer: %llu us\n",
742 tp_tick_us * t4_read_reg(adap, TP_KEEP_IDLE_A));
743 seq_printf(seq, "Keepalive interval: %llu us\n",
744 tp_tick_us * t4_read_reg(adap, TP_KEEP_INTVL_A));
745 seq_printf(seq, "Initial SRTT: %llu us\n",
746 tp_tick_us * INITSRTT_G(t4_read_reg(adap, TP_INIT_SRTT_A)));
747 seq_printf(seq, "FINWAIT2 timer: %llu us\n",
748 tp_tick_us * t4_read_reg(adap, TP_FINWAIT2_TIMER_A));
749
750 return 0;
751}
752
753DEFINE_SIMPLE_DEBUGFS_FILE(clk);
754
755/* Firmware Device Log dump. */
756static const char * const devlog_level_strings[] = {
757 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
758 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
759 [FW_DEVLOG_LEVEL_ERR] = "ERR",
760 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
761 [FW_DEVLOG_LEVEL_INFO] = "INFO",
762 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
763};
764
765static const char * const devlog_facility_strings[] = {
766 [FW_DEVLOG_FACILITY_CORE] = "CORE",
767 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
768 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
769 [FW_DEVLOG_FACILITY_RES] = "RES",
770 [FW_DEVLOG_FACILITY_HW] = "HW",
771 [FW_DEVLOG_FACILITY_FLR] = "FLR",
772 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
773 [FW_DEVLOG_FACILITY_PHY] = "PHY",
774 [FW_DEVLOG_FACILITY_MAC] = "MAC",
775 [FW_DEVLOG_FACILITY_PORT] = "PORT",
776 [FW_DEVLOG_FACILITY_VI] = "VI",
777 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
778 [FW_DEVLOG_FACILITY_ACL] = "ACL",
779 [FW_DEVLOG_FACILITY_TM] = "TM",
780 [FW_DEVLOG_FACILITY_QFC] = "QFC",
781 [FW_DEVLOG_FACILITY_DCB] = "DCB",
782 [FW_DEVLOG_FACILITY_ETH] = "ETH",
783 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
784 [FW_DEVLOG_FACILITY_RI] = "RI",
785 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
786 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
787 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
788 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
789};
790
791/* Information gathered by Device Log Open routine for the display routine.
792 */
793struct devlog_info {
794 unsigned int nentries; /* number of entries in log[] */
795 unsigned int first; /* first [temporal] entry in log[] */
796 struct fw_devlog_e log[0]; /* Firmware Device Log */
797};
798
799/* Dump a Firmaware Device Log entry.
800 */
801static int devlog_show(struct seq_file *seq, void *v)
802{
803 if (v == SEQ_START_TOKEN)
804 seq_printf(seq, "%10s %15s %8s %8s %s\n",
805 "Seq#", "Tstamp", "Level", "Facility", "Message");
806 else {
807 struct devlog_info *dinfo = seq->private;
808 int fidx = (uintptr_t)v - 2;
809 unsigned long index;
810 struct fw_devlog_e *e;
811
812 /* Get a pointer to the log entry to display. Skip unused log
813 * entries.
814 */
815 index = dinfo->first + fidx;
816 if (index >= dinfo->nentries)
817 index -= dinfo->nentries;
818 e = &dinfo->log[index];
819 if (e->timestamp == 0)
820 return 0;
821
822 /* Print the message. This depends on the firmware using
823 * exactly the same formating strings as the kernel so we may
824 * eventually have to put a format interpreter in here ...
825 */
826 seq_printf(seq, "%10d %15llu %8s %8s ",
827 e->seqno, e->timestamp,
828 (e->level < ARRAY_SIZE(devlog_level_strings)
829 ? devlog_level_strings[e->level]
830 : "UNKNOWN"),
831 (e->facility < ARRAY_SIZE(devlog_facility_strings)
832 ? devlog_facility_strings[e->facility]
833 : "UNKNOWN"));
834 seq_printf(seq, e->fmt, e->params[0], e->params[1],
835 e->params[2], e->params[3], e->params[4],
836 e->params[5], e->params[6], e->params[7]);
837 }
838 return 0;
839}
840
841/* Sequential File Operations for Device Log.
842 */
843static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos)
844{
845 if (pos > dinfo->nentries)
846 return NULL;
847
848 return (void *)(uintptr_t)(pos + 1);
849}
850
851static void *devlog_start(struct seq_file *seq, loff_t *pos)
852{
853 struct devlog_info *dinfo = seq->private;
854
855 return (*pos
856 ? devlog_get_idx(dinfo, *pos)
857 : SEQ_START_TOKEN);
858}
859
860static void *devlog_next(struct seq_file *seq, void *v, loff_t *pos)
861{
862 struct devlog_info *dinfo = seq->private;
863
864 (*pos)++;
865 return devlog_get_idx(dinfo, *pos);
866}
867
868static void devlog_stop(struct seq_file *seq, void *v)
869{
870}
871
872static const struct seq_operations devlog_seq_ops = {
873 .start = devlog_start,
874 .next = devlog_next,
875 .stop = devlog_stop,
876 .show = devlog_show
877};
878
879/* Set up for reading the firmware's device log. We read the entire log here
880 * and then display it incrementally in devlog_show().
881 */
882static int devlog_open(struct inode *inode, struct file *file)
883{
884 struct adapter *adap = inode->i_private;
885 struct devlog_params *dparams = &adap->params.devlog;
886 struct devlog_info *dinfo;
887 unsigned int index;
888 u32 fseqno;
889 int ret;
890
891 /* If we don't know where the log is we can't do anything.
892 */
893 if (dparams->start == 0)
894 return -ENXIO;
895
896 /* Allocate the space to read in the firmware's device log and set up
897 * for the iterated call to our display function.
898 */
899 dinfo = __seq_open_private(file, &devlog_seq_ops,
900 sizeof(*dinfo) + dparams->size);
901 if (!dinfo)
902 return -ENOMEM;
903
904 /* Record the basic log buffer information and read in the raw log.
905 */
906 dinfo->nentries = (dparams->size / sizeof(struct fw_devlog_e));
907 dinfo->first = 0;
908 spin_lock(&adap->win0_lock);
909 ret = t4_memory_rw(adap, adap->params.drv_memwin, dparams->memtype,
910 dparams->start, dparams->size, (__be32 *)dinfo->log,
911 T4_MEMORY_READ);
912 spin_unlock(&adap->win0_lock);
913 if (ret) {
914 seq_release_private(inode, file);
915 return ret;
916 }
917
918 /* Translate log multi-byte integral elements into host native format
919 * and determine where the first entry in the log is.
920 */
921 for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
922 struct fw_devlog_e *e = &dinfo->log[index];
923 int i;
924 __u32 seqno;
925
926 if (e->timestamp == 0)
927 continue;
928
929 e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
930 seqno = be32_to_cpu(e->seqno);
931 for (i = 0; i < 8; i++)
932 e->params[i] =
933 (__force __be32)be32_to_cpu(e->params[i]);
934
935 if (seqno < fseqno) {
936 fseqno = seqno;
937 dinfo->first = index;
938 }
939 }
940 return 0;
941}
942
943static const struct file_operations devlog_fops = {
944 .owner = THIS_MODULE,
945 .open = devlog_open,
946 .read = seq_read,
947 .llseek = seq_lseek,
948 .release = seq_release_private
949};
950
951static int mbox_show(struct seq_file *seq, void *v)
952{
953 static const char * const owner[] = { "none", "FW", "driver",
954 "unknown" };
955
956 int i;
957 unsigned int mbox = (uintptr_t)seq->private & 7;
958 struct adapter *adap = seq->private - mbox;
959 void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
960 unsigned int ctrl_reg = (is_t4(adap->params.chip)
961 ? CIM_PF_MAILBOX_CTRL_A
962 : CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A);
963 void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
964
965 i = MBOWNER_G(readl(ctrl));
966 seq_printf(seq, "mailbox owned by %s\n\n", owner[i]);
967
968 for (i = 0; i < MBOX_LEN; i += 8)
969 seq_printf(seq, "%016llx\n",
970 (unsigned long long)readq(addr + i));
971 return 0;
972}
973
974static int mbox_open(struct inode *inode, struct file *file)
975{
976 return single_open(file, mbox_show, inode->i_private);
977}
978
979static ssize_t mbox_write(struct file *file, const char __user *buf,
980 size_t count, loff_t *pos)
981{
982 int i;
983 char c = '\n', s[256];
984 unsigned long long data[8];
985 const struct inode *ino;
986 unsigned int mbox;
987 struct adapter *adap;
988 void __iomem *addr;
989 void __iomem *ctrl;
990
991 if (count > sizeof(s) - 1 || !count)
992 return -EINVAL;
993 if (copy_from_user(s, buf, count))
994 return -EFAULT;
995 s[count] = '\0';
996
997 if (sscanf(s, "%llx %llx %llx %llx %llx %llx %llx %llx%c", &data[0],
998 &data[1], &data[2], &data[3], &data[4], &data[5], &data[6],
999 &data[7], &c) < 8 || c != '\n')
1000 return -EINVAL;
1001
1002 ino = FILE_DATA(file);
1003 mbox = (uintptr_t)ino->i_private & 7;
1004 adap = ino->i_private - mbox;
1005 addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
1006 ctrl = addr + MBOX_LEN;
1007
1008 if (MBOWNER_G(readl(ctrl)) != X_MBOWNER_PL)
1009 return -EBUSY;
1010
1011 for (i = 0; i < 8; i++)
1012 writeq(data[i], addr + 8 * i);
1013
1014 writel(MBMSGVALID_F | MBOWNER_V(X_MBOWNER_FW), ctrl);
1015 return count;
1016}
1017
1018static const struct file_operations mbox_debugfs_fops = {
1019 .owner = THIS_MODULE,
1020 .open = mbox_open,
1021 .read = seq_read,
1022 .llseek = seq_lseek,
1023 .release = single_release,
1024 .write = mbox_write
1025};
1026
1027static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
1028 loff_t *ppos)
1029{
1030 loff_t pos = *ppos;
1031 loff_t avail = FILE_DATA(file)->i_size;
1032 struct adapter *adap = file->private_data;
1033
1034 if (pos < 0)
1035 return -EINVAL;
1036 if (pos >= avail)
1037 return 0;
1038 if (count > avail - pos)
1039 count = avail - pos;
1040
1041 while (count) {
1042 size_t len;
1043 int ret, ofst;
1044 u8 data[256];
1045
1046 ofst = pos & 3;
1047 len = min(count + ofst, sizeof(data));
1048 ret = t4_read_flash(adap, pos - ofst, (len + 3) / 4,
1049 (u32 *)data, 1);
1050 if (ret)
1051 return ret;
1052
1053 len -= ofst;
1054 if (copy_to_user(buf, data + ofst, len))
1055 return -EFAULT;
1056
1057 buf += len;
1058 pos += len;
1059 count -= len;
1060 }
1061 count = pos - *ppos;
1062 *ppos = pos;
1063 return count;
1064}
1065
1066static const struct file_operations flash_debugfs_fops = {
1067 .owner = THIS_MODULE,
1068 .open = mem_open,
1069 .read = flash_read,
1070};
1071
1072static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
1073{
1074 *mask = x | y;
1075 y = (__force u64)cpu_to_be64(y);
1076 memcpy(addr, (char *)&y + 2, ETH_ALEN);
1077}
1078
1079static int mps_tcam_show(struct seq_file *seq, void *v)
1080{
1081 if (v == SEQ_START_TOKEN)
1082 seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF"
1083 " VF Replication "
1084 "P0 P1 P2 P3 ML\n");
1085 else {
1086 u64 mask;
1087 u8 addr[ETH_ALEN];
1088 struct adapter *adap = seq->private;
1089 unsigned int idx = (uintptr_t)v - 2;
1090 u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
1091 u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
1092 u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
1093 u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
1094 u32 rplc[4] = {0, 0, 0, 0};
1095
1096 if (tcamx & tcamy) {
1097 seq_printf(seq, "%3u -\n", idx);
1098 goto out;
1099 }
1100
1101 if (cls_lo & REPLICATE_F) {
1102 struct fw_ldst_cmd ldst_cmd;
1103 int ret;
1104
1105 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1106 ldst_cmd.op_to_addrspace =
1107 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
1108 FW_CMD_REQUEST_F |
1109 FW_CMD_READ_F |
1110 FW_LDST_CMD_ADDRSPACE_V(
1111 FW_LDST_ADDRSPC_MPS));
1112 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
1113 ldst_cmd.u.mps.fid_ctl =
1114 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
1115 FW_LDST_CMD_CTL_V(idx));
1116 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
1117 sizeof(ldst_cmd), &ldst_cmd);
1118 if (ret)
1119 dev_warn(adap->pdev_dev, "Can't read MPS "
1120 "replication map for idx %d: %d\n",
1121 idx, -ret);
1122 else {
1123 rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
1124 rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
1125 rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
1126 rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
1127 }
1128 }
1129
1130 tcamxy2valmask(tcamx, tcamy, addr, &mask);
1131 seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
1132 "%3c %#x%4u%4d",
1133 idx, addr[0], addr[1], addr[2], addr[3], addr[4],
1134 addr[5], (unsigned long long)mask,
1135 (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
1136 PF_G(cls_lo),
1137 (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
1138 if (cls_lo & REPLICATE_F)
1139 seq_printf(seq, " %08x %08x %08x %08x",
1140 rplc[3], rplc[2], rplc[1], rplc[0]);
1141 else
1142 seq_printf(seq, "%36c", ' ');
1143 seq_printf(seq, "%4u%3u%3u%3u %#x\n",
1144 SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
1145 SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
1146 (cls_lo >> MULTILISTEN0_S) & 0xf);
1147 }
1148out: return 0;
1149}
1150
1151static inline void *mps_tcam_get_idx(struct seq_file *seq, loff_t pos)
1152{
1153 struct adapter *adap = seq->private;
1154 int max_mac_addr = is_t4(adap->params.chip) ?
1155 NUM_MPS_CLS_SRAM_L_INSTANCES :
1156 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1157 return ((pos <= max_mac_addr) ? (void *)(uintptr_t)(pos + 1) : NULL);
1158}
1159
1160static void *mps_tcam_start(struct seq_file *seq, loff_t *pos)
1161{
1162 return *pos ? mps_tcam_get_idx(seq, *pos) : SEQ_START_TOKEN;
1163}
1164
1165static void *mps_tcam_next(struct seq_file *seq, void *v, loff_t *pos)
1166{
1167 ++*pos;
1168 return mps_tcam_get_idx(seq, *pos);
1169}
1170
1171static void mps_tcam_stop(struct seq_file *seq, void *v)
1172{
1173}
1174
1175static const struct seq_operations mps_tcam_seq_ops = {
1176 .start = mps_tcam_start,
1177 .next = mps_tcam_next,
1178 .stop = mps_tcam_stop,
1179 .show = mps_tcam_show
1180};
1181
1182static int mps_tcam_open(struct inode *inode, struct file *file)
1183{
1184 int res = seq_open(file, &mps_tcam_seq_ops);
1185
1186 if (!res) {
1187 struct seq_file *seq = file->private_data;
1188
1189 seq->private = inode->i_private;
1190 }
1191 return res;
1192}
1193
1194static const struct file_operations mps_tcam_debugfs_fops = {
1195 .owner = THIS_MODULE,
1196 .open = mps_tcam_open,
1197 .read = seq_read,
1198 .llseek = seq_lseek,
1199 .release = seq_release,
1200};
1201
1202/* Display various sensor information.
1203 */
1204static int sensors_show(struct seq_file *seq, void *v)
1205{
1206 struct adapter *adap = seq->private;
1207 u32 param[7], val[7];
1208 int ret;
1209
1210 /* Note that if the sensors haven't been initialized and turned on
1211 * we'll get values of 0, so treat those as "<unknown>" ...
1212 */
1213 param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1214 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
1215 FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP));
1216 param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1217 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
1218 FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
1219 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
1220 param, val);
1221
1222 if (ret < 0 || val[0] == 0)
1223 seq_puts(seq, "Temperature: <unknown>\n");
1224 else
1225 seq_printf(seq, "Temperature: %dC\n", val[0]);
1226
1227 if (ret < 0 || val[1] == 0)
1228 seq_puts(seq, "Core VDD: <unknown>\n");
1229 else
1230 seq_printf(seq, "Core VDD: %dmV\n", val[1]);
1231
1232 return 0;
1233}
1234
1235DEFINE_SIMPLE_DEBUGFS_FILE(sensors);
1236
1237#if IS_ENABLED(CONFIG_IPV6)
1238static int clip_tbl_open(struct inode *inode, struct file *file)
1239{
1240 return single_open(file, clip_tbl_show, inode->i_private);
1241}
1242
1243static const struct file_operations clip_tbl_debugfs_fops = {
1244 .owner = THIS_MODULE,
1245 .open = clip_tbl_open,
1246 .read = seq_read,
1247 .llseek = seq_lseek,
1248 .release = single_release
1249};
1250#endif
1251
1252/*RSS Table.
1253 */
1254
1255static int rss_show(struct seq_file *seq, void *v, int idx)
1256{
1257 u16 *entry = v;
1258
1259 seq_printf(seq, "%4d: %4u %4u %4u %4u %4u %4u %4u %4u\n",
1260 idx * 8, entry[0], entry[1], entry[2], entry[3], entry[4],
1261 entry[5], entry[6], entry[7]);
1262 return 0;
1263}
1264
1265static int rss_open(struct inode *inode, struct file *file)
1266{
1267 int ret;
1268 struct seq_tab *p;
1269 struct adapter *adap = inode->i_private;
1270
1271 p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
1272 if (!p)
1273 return -ENOMEM;
1274
1275 ret = t4_read_rss(adap, (u16 *)p->data);
1276 if (ret)
1277 seq_release_private(inode, file);
1278
1279 return ret;
1280}
1281
1282static const struct file_operations rss_debugfs_fops = {
1283 .owner = THIS_MODULE,
1284 .open = rss_open,
1285 .read = seq_read,
1286 .llseek = seq_lseek,
1287 .release = seq_release_private
1288};
1289
1290/* RSS Configuration.
1291 */
1292
1293/* Small utility function to return the strings "yes" or "no" if the supplied
1294 * argument is non-zero.
1295 */
1296static const char *yesno(int x)
1297{
1298 static const char *yes = "yes";
1299 static const char *no = "no";
1300
1301 return x ? yes : no;
1302}
1303
1304static int rss_config_show(struct seq_file *seq, void *v)
1305{
1306 struct adapter *adapter = seq->private;
1307 static const char * const keymode[] = {
1308 "global",
1309 "global and per-VF scramble",
1310 "per-PF and per-VF scramble",
1311 "per-VF and per-VF scramble",
1312 };
1313 u32 rssconf;
1314
1315 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_A);
1316 seq_printf(seq, "TP_RSS_CONFIG: %#x\n", rssconf);
1317 seq_printf(seq, " Tnl4TupEnIpv6: %3s\n", yesno(rssconf &
1318 TNL4TUPENIPV6_F));
1319 seq_printf(seq, " Tnl2TupEnIpv6: %3s\n", yesno(rssconf &
1320 TNL2TUPENIPV6_F));
1321 seq_printf(seq, " Tnl4TupEnIpv4: %3s\n", yesno(rssconf &
1322 TNL4TUPENIPV4_F));
1323 seq_printf(seq, " Tnl2TupEnIpv4: %3s\n", yesno(rssconf &
1324 TNL2TUPENIPV4_F));
1325 seq_printf(seq, " TnlTcpSel: %3s\n", yesno(rssconf & TNLTCPSEL_F));
1326 seq_printf(seq, " TnlIp6Sel: %3s\n", yesno(rssconf & TNLIP6SEL_F));
1327 seq_printf(seq, " TnlVrtSel: %3s\n", yesno(rssconf & TNLVRTSEL_F));
1328 seq_printf(seq, " TnlMapEn: %3s\n", yesno(rssconf & TNLMAPEN_F));
1329 seq_printf(seq, " OfdHashSave: %3s\n", yesno(rssconf &
1330 OFDHASHSAVE_F));
1331 seq_printf(seq, " OfdVrtSel: %3s\n", yesno(rssconf & OFDVRTSEL_F));
1332 seq_printf(seq, " OfdMapEn: %3s\n", yesno(rssconf & OFDMAPEN_F));
1333 seq_printf(seq, " OfdLkpEn: %3s\n", yesno(rssconf & OFDLKPEN_F));
1334 seq_printf(seq, " Syn4TupEnIpv6: %3s\n", yesno(rssconf &
1335 SYN4TUPENIPV6_F));
1336 seq_printf(seq, " Syn2TupEnIpv6: %3s\n", yesno(rssconf &
1337 SYN2TUPENIPV6_F));
1338 seq_printf(seq, " Syn4TupEnIpv4: %3s\n", yesno(rssconf &
1339 SYN4TUPENIPV4_F));
1340 seq_printf(seq, " Syn2TupEnIpv4: %3s\n", yesno(rssconf &
1341 SYN2TUPENIPV4_F));
1342 seq_printf(seq, " Syn4TupEnIpv6: %3s\n", yesno(rssconf &
1343 SYN4TUPENIPV6_F));
1344 seq_printf(seq, " SynIp6Sel: %3s\n", yesno(rssconf & SYNIP6SEL_F));
1345 seq_printf(seq, " SynVrt6Sel: %3s\n", yesno(rssconf & SYNVRTSEL_F));
1346 seq_printf(seq, " SynMapEn: %3s\n", yesno(rssconf & SYNMAPEN_F));
1347 seq_printf(seq, " SynLkpEn: %3s\n", yesno(rssconf & SYNLKPEN_F));
1348 seq_printf(seq, " ChnEn: %3s\n", yesno(rssconf &
1349 CHANNELENABLE_F));
1350 seq_printf(seq, " PrtEn: %3s\n", yesno(rssconf &
1351 PORTENABLE_F));
1352 seq_printf(seq, " TnlAllLkp: %3s\n", yesno(rssconf &
1353 TNLALLLOOKUP_F));
1354 seq_printf(seq, " VrtEn: %3s\n", yesno(rssconf &
1355 VIRTENABLE_F));
1356 seq_printf(seq, " CngEn: %3s\n", yesno(rssconf &
1357 CONGESTIONENABLE_F));
1358 seq_printf(seq, " HashToeplitz: %3s\n", yesno(rssconf &
1359 HASHTOEPLITZ_F));
1360 seq_printf(seq, " Udp4En: %3s\n", yesno(rssconf & UDPENABLE_F));
1361 seq_printf(seq, " Disable: %3s\n", yesno(rssconf & DISABLE_F));
1362
1363 seq_puts(seq, "\n");
1364
1365 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_TNL_A);
1366 seq_printf(seq, "TP_RSS_CONFIG_TNL: %#x\n", rssconf);
1367 seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf));
1368 seq_printf(seq, " MaskFilter: %3d\n", MASKFILTER_G(rssconf));
1369 if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
1370 seq_printf(seq, " HashAll: %3s\n",
1371 yesno(rssconf & HASHALL_F));
1372 seq_printf(seq, " HashEth: %3s\n",
1373 yesno(rssconf & HASHETH_F));
1374 }
1375 seq_printf(seq, " UseWireCh: %3s\n", yesno(rssconf & USEWIRECH_F));
1376
1377 seq_puts(seq, "\n");
1378
1379 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_OFD_A);
1380 seq_printf(seq, "TP_RSS_CONFIG_OFD: %#x\n", rssconf);
1381 seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf));
1382 seq_printf(seq, " RRCplMapEn: %3s\n", yesno(rssconf &
1383 RRCPLMAPEN_F));
1384 seq_printf(seq, " RRCplQueWidth: %3d\n", RRCPLQUEWIDTH_G(rssconf));
1385
1386 seq_puts(seq, "\n");
1387
1388 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_SYN_A);
1389 seq_printf(seq, "TP_RSS_CONFIG_SYN: %#x\n", rssconf);
1390 seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf));
1391 seq_printf(seq, " UseWireCh: %3s\n", yesno(rssconf & USEWIRECH_F));
1392
1393 seq_puts(seq, "\n");
1394
1395 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
1396 seq_printf(seq, "TP_RSS_CONFIG_VRT: %#x\n", rssconf);
1397 if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) {
1398 seq_printf(seq, " KeyWrAddrX: %3d\n",
1399 KEYWRADDRX_G(rssconf));
1400 seq_printf(seq, " KeyExtend: %3s\n",
1401 yesno(rssconf & KEYEXTEND_F));
1402 }
1403 seq_printf(seq, " VfRdRg: %3s\n", yesno(rssconf & VFRDRG_F));
1404 seq_printf(seq, " VfRdEn: %3s\n", yesno(rssconf & VFRDEN_F));
1405 seq_printf(seq, " VfPerrEn: %3s\n", yesno(rssconf & VFPERREN_F));
1406 seq_printf(seq, " KeyPerrEn: %3s\n", yesno(rssconf & KEYPERREN_F));
1407 seq_printf(seq, " DisVfVlan: %3s\n", yesno(rssconf &
1408 DISABLEVLAN_F));
1409 seq_printf(seq, " EnUpSwt: %3s\n", yesno(rssconf & ENABLEUP0_F));
1410 seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf));
1411 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1412 seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf));
1413 seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]);
1414 seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F));
1415 seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F));
1416 seq_printf(seq, " KeyWrAddr: %3d\n", KEYWRADDR_G(rssconf));
1417
1418 seq_puts(seq, "\n");
1419
1420 rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_CNG_A);
1421 seq_printf(seq, "TP_RSS_CONFIG_CNG: %#x\n", rssconf);
1422 seq_printf(seq, " ChnCount3: %3s\n", yesno(rssconf & CHNCOUNT3_F));
1423 seq_printf(seq, " ChnCount2: %3s\n", yesno(rssconf & CHNCOUNT2_F));
1424 seq_printf(seq, " ChnCount1: %3s\n", yesno(rssconf & CHNCOUNT1_F));
1425 seq_printf(seq, " ChnCount0: %3s\n", yesno(rssconf & CHNCOUNT0_F));
1426 seq_printf(seq, " ChnUndFlow3: %3s\n", yesno(rssconf &
1427 CHNUNDFLOW3_F));
1428 seq_printf(seq, " ChnUndFlow2: %3s\n", yesno(rssconf &
1429 CHNUNDFLOW2_F));
1430 seq_printf(seq, " ChnUndFlow1: %3s\n", yesno(rssconf &
1431 CHNUNDFLOW1_F));
1432 seq_printf(seq, " ChnUndFlow0: %3s\n", yesno(rssconf &
1433 CHNUNDFLOW0_F));
1434 seq_printf(seq, " RstChn3: %3s\n", yesno(rssconf & RSTCHN3_F));
1435 seq_printf(seq, " RstChn2: %3s\n", yesno(rssconf & RSTCHN2_F));
1436 seq_printf(seq, " RstChn1: %3s\n", yesno(rssconf & RSTCHN1_F));
1437 seq_printf(seq, " RstChn0: %3s\n", yesno(rssconf & RSTCHN0_F));
1438 seq_printf(seq, " UpdVld: %3s\n", yesno(rssconf & UPDVLD_F));
1439 seq_printf(seq, " Xoff: %3s\n", yesno(rssconf & XOFF_F));
1440 seq_printf(seq, " UpdChn3: %3s\n", yesno(rssconf & UPDCHN3_F));
1441 seq_printf(seq, " UpdChn2: %3s\n", yesno(rssconf & UPDCHN2_F));
1442 seq_printf(seq, " UpdChn1: %3s\n", yesno(rssconf & UPDCHN1_F));
1443 seq_printf(seq, " UpdChn0: %3s\n", yesno(rssconf & UPDCHN0_F));
1444 seq_printf(seq, " Queue: %3d\n", QUEUE_G(rssconf));
1445
1446 return 0;
1447}
1448
1449DEFINE_SIMPLE_DEBUGFS_FILE(rss_config);
1450
1451/* RSS Secret Key.
1452 */
1453
1454static int rss_key_show(struct seq_file *seq, void *v)
1455{
1456 u32 key[10];
1457
1458 t4_read_rss_key(seq->private, key);
1459 seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1460 key[9], key[8], key[7], key[6], key[5], key[4], key[3],
1461 key[2], key[1], key[0]);
1462 return 0;
1463}
1464
1465static int rss_key_open(struct inode *inode, struct file *file)
1466{
1467 return single_open(file, rss_key_show, inode->i_private);
1468}
1469
1470static ssize_t rss_key_write(struct file *file, const char __user *buf,
1471 size_t count, loff_t *pos)
1472{
1473 int i, j;
1474 u32 key[10];
1475 char s[100], *p;
1476 struct adapter *adap = FILE_DATA(file)->i_private;
1477
1478 if (count > sizeof(s) - 1)
1479 return -EINVAL;
1480 if (copy_from_user(s, buf, count))
1481 return -EFAULT;
1482 for (i = count; i > 0 && isspace(s[i - 1]); i--)
1483 ;
1484 s[i] = '\0';
1485
1486 for (p = s, i = 9; i >= 0; i--) {
1487 key[i] = 0;
1488 for (j = 0; j < 8; j++, p++) {
1489 if (!isxdigit(*p))
1490 return -EINVAL;
1491 key[i] = (key[i] << 4) | hex2val(*p);
1492 }
1493 }
1494
1495 t4_write_rss_key(adap, key, -1);
1496 return count;
1497}
1498
1499static const struct file_operations rss_key_debugfs_fops = {
1500 .owner = THIS_MODULE,
1501 .open = rss_key_open,
1502 .read = seq_read,
1503 .llseek = seq_lseek,
1504 .release = single_release,
1505 .write = rss_key_write
1506};
1507
1508/* PF RSS Configuration.
1509 */
1510
1511struct rss_pf_conf {
1512 u32 rss_pf_map;
1513 u32 rss_pf_mask;
1514 u32 rss_pf_config;
1515};
1516
1517static int rss_pf_config_show(struct seq_file *seq, void *v, int idx)
1518{
1519 struct rss_pf_conf *pfconf;
1520
1521 if (v == SEQ_START_TOKEN) {
1522 /* use the 0th entry to dump the PF Map Index Size */
1523 pfconf = seq->private + offsetof(struct seq_tab, data);
1524 seq_printf(seq, "PF Map Index Size = %d\n\n",
1525 LKPIDXSIZE_G(pfconf->rss_pf_map));
1526
1527 seq_puts(seq, " RSS PF VF Hash Tuple Enable Default\n");
1528 seq_puts(seq, " Enable IPF Mask Mask IPv6 IPv4 UDP Queue\n");
1529 seq_puts(seq, " PF Map Chn Prt Map Size Size Four Two Four Two Four Ch1 Ch0\n");
1530 } else {
1531 #define G_PFnLKPIDX(map, n) \
1532 (((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M)
1533 #define G_PFnMSKSIZE(mask, n) \
1534 (((mask) >> PF1MSKSIZE_S*(n)) & PF1MSKSIZE_M)
1535
1536 pfconf = v;
1537 seq_printf(seq, "%3d %3s %3s %3s %3d %3d %3d %3s %3s %3s %3s %3s %3d %3d\n",
1538 idx,
1539 yesno(pfconf->rss_pf_config & MAPENABLE_F),
1540 yesno(pfconf->rss_pf_config & CHNENABLE_F),
1541 yesno(pfconf->rss_pf_config & PRTENABLE_F),
1542 G_PFnLKPIDX(pfconf->rss_pf_map, idx),
1543 G_PFnMSKSIZE(pfconf->rss_pf_mask, idx),
1544 IVFWIDTH_G(pfconf->rss_pf_config),
1545 yesno(pfconf->rss_pf_config & IP6FOURTUPEN_F),
1546 yesno(pfconf->rss_pf_config & IP6TWOTUPEN_F),
1547 yesno(pfconf->rss_pf_config & IP4FOURTUPEN_F),
1548 yesno(pfconf->rss_pf_config & IP4TWOTUPEN_F),
1549 yesno(pfconf->rss_pf_config & UDPFOURTUPEN_F),
1550 CH1DEFAULTQUEUE_G(pfconf->rss_pf_config),
1551 CH0DEFAULTQUEUE_G(pfconf->rss_pf_config));
1552
1553 #undef G_PFnLKPIDX
1554 #undef G_PFnMSKSIZE
1555 }
1556 return 0;
1557}
1558
1559static int rss_pf_config_open(struct inode *inode, struct file *file)
1560{
1561 struct adapter *adapter = inode->i_private;
1562 struct seq_tab *p;
1563 u32 rss_pf_map, rss_pf_mask;
1564 struct rss_pf_conf *pfconf;
1565 int pf;
1566
1567 p = seq_open_tab(file, 8, sizeof(*pfconf), 1, rss_pf_config_show);
1568 if (!p)
1569 return -ENOMEM;
1570
1571 pfconf = (struct rss_pf_conf *)p->data;
1572 rss_pf_map = t4_read_rss_pf_map(adapter);
1573 rss_pf_mask = t4_read_rss_pf_mask(adapter);
1574 for (pf = 0; pf < 8; pf++) {
1575 pfconf[pf].rss_pf_map = rss_pf_map;
1576 pfconf[pf].rss_pf_mask = rss_pf_mask;
1577 t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config);
1578 }
1579 return 0;
1580}
1581
1582static const struct file_operations rss_pf_config_debugfs_fops = {
1583 .owner = THIS_MODULE,
1584 .open = rss_pf_config_open,
1585 .read = seq_read,
1586 .llseek = seq_lseek,
1587 .release = seq_release_private
1588};
1589
1590/* VF RSS Configuration.
1591 */
1592
1593struct rss_vf_conf {
1594 u32 rss_vf_vfl;
1595 u32 rss_vf_vfh;
1596};
1597
1598static int rss_vf_config_show(struct seq_file *seq, void *v, int idx)
1599{
1600 if (v == SEQ_START_TOKEN) {
1601 seq_puts(seq, " RSS Hash Tuple Enable\n");
1602 seq_puts(seq, " Enable IVF Dis Enb IPv6 IPv4 UDP Def Secret Key\n");
1603 seq_puts(seq, " VF Chn Prt Map VLAN uP Four Two Four Two Four Que Idx Hash\n");
1604 } else {
1605 struct rss_vf_conf *vfconf = v;
1606
1607 seq_printf(seq, "%3d %3s %3s %3d %3s %3s %3s %3s %3s %3s %3s %4d %3d %#10x\n",
1608 idx,
1609 yesno(vfconf->rss_vf_vfh & VFCHNEN_F),
1610 yesno(vfconf->rss_vf_vfh & VFPRTEN_F),
1611 VFLKPIDX_G(vfconf->rss_vf_vfh),
1612 yesno(vfconf->rss_vf_vfh & VFVLNEX_F),
1613 yesno(vfconf->rss_vf_vfh & VFUPEN_F),
1614 yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
1615 yesno(vfconf->rss_vf_vfh & VFIP6TWOTUPEN_F),
1616 yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F),
1617 yesno(vfconf->rss_vf_vfh & VFIP4TWOTUPEN_F),
1618 yesno(vfconf->rss_vf_vfh & ENABLEUDPHASH_F),
1619 DEFAULTQUEUE_G(vfconf->rss_vf_vfh),
1620 KEYINDEX_G(vfconf->rss_vf_vfh),
1621 vfconf->rss_vf_vfl);
1622 }
1623 return 0;
1624}
1625
1626static int rss_vf_config_open(struct inode *inode, struct file *file)
1627{
1628 struct adapter *adapter = inode->i_private;
1629 struct seq_tab *p;
1630 struct rss_vf_conf *vfconf;
1631 int vf;
1632
1633 p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
1634 if (!p)
1635 return -ENOMEM;
1636
1637 vfconf = (struct rss_vf_conf *)p->data;
1638 for (vf = 0; vf < 128; vf++) {
1639 t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
1640 &vfconf[vf].rss_vf_vfh);
1641 }
1642 return 0;
1643}
1644
1645static const struct file_operations rss_vf_config_debugfs_fops = {
1646 .owner = THIS_MODULE,
1647 .open = rss_vf_config_open,
1648 .read = seq_read,
1649 .llseek = seq_lseek,
1650 .release = seq_release_private
1651};
1652
1653/**
1654 * ethqset2pinfo - return port_info of an Ethernet Queue Set
1655 * @adap: the adapter
1656 * @qset: Ethernet Queue Set
1657 */
1658static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
1659{
1660 int pidx;
1661
1662 for_each_port(adap, pidx) {
1663 struct port_info *pi = adap2pinfo(adap, pidx);
1664
1665 if (qset >= pi->first_qset &&
1666 qset < pi->first_qset + pi->nqsets)
1667 return pi;
1668 }
1669
1670 /* should never happen! */
1671 BUG_ON(1);
1672 return NULL;
1673}
1674
1675static int sge_qinfo_show(struct seq_file *seq, void *v)
1676{
1677 struct adapter *adap = seq->private;
1678 int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
1679 int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
1680 int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
1681 int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
1682 int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
1683 int i, r = (uintptr_t)v - 1;
1684 int toe_idx = r - eth_entries;
1685 int rdma_idx = toe_idx - toe_entries;
1686 int ciq_idx = rdma_idx - rdma_entries;
1687 int ctrl_idx = ciq_idx - ciq_entries;
1688 int fq_idx = ctrl_idx - ctrl_entries;
1689
1690 if (r)
1691 seq_putc(seq, '\n');
1692
1693#define S3(fmt_spec, s, v) \
1694do { \
1695 seq_printf(seq, "%-12s", s); \
1696 for (i = 0; i < n; ++i) \
1697 seq_printf(seq, " %16" fmt_spec, v); \
1698 seq_putc(seq, '\n'); \
1699} while (0)
1700#define S(s, v) S3("s", s, v)
1701#define T(s, v) S3("u", s, tx[i].v)
1702#define R(s, v) S3("u", s, rx[i].v)
1703
1704 if (r < eth_entries) {
1705 int base_qset = r * 4;
1706 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset];
1707 const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset];
1708 int n = min(4, adap->sge.ethqsets - 4 * r);
1709
1710 S("QType:", "Ethernet");
1711 S("Interface:",
1712 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
1713 T("TxQ ID:", q.cntxt_id);
1714 T("TxQ size:", q.size);
1715 T("TxQ inuse:", q.in_use);
1716 T("TxQ CIDX:", q.cidx);
1717 T("TxQ PIDX:", q.pidx);
1718#ifdef CONFIG_CHELSIO_T4_DCB
1719 T("DCB Prio:", dcb_prio);
1720 S3("u", "DCB PGID:",
1721 (ethqset2pinfo(adap, base_qset + i)->dcb.pgid >>
1722 4*(7-tx[i].dcb_prio)) & 0xf);
1723 S3("u", "DCB PFC:",
1724 (ethqset2pinfo(adap, base_qset + i)->dcb.pfcen >>
1725 1*(7-tx[i].dcb_prio)) & 0x1);
1726#endif
1727 R("RspQ ID:", rspq.abs_id);
1728 R("RspQ size:", rspq.size);
1729 R("RspQE size:", rspq.iqe_len);
1730 R("RspQ CIDX:", rspq.cidx);
1731 R("RspQ Gen:", rspq.gen);
1732 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
1733 S3("u", "Intr pktcnt:",
1734 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
1735 R("FL ID:", fl.cntxt_id);
1736 R("FL size:", fl.size - 8);
1737 R("FL pend:", fl.pend_cred);
1738 R("FL avail:", fl.avail);
1739 R("FL PIDX:", fl.pidx);
1740 R("FL CIDX:", fl.cidx);
1741 } else if (toe_idx < toe_entries) {
1742 const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
1743 const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
1744 int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
1745
1746 S("QType:", "TOE");
1747 T("TxQ ID:", q.cntxt_id);
1748 T("TxQ size:", q.size);
1749 T("TxQ inuse:", q.in_use);
1750 T("TxQ CIDX:", q.cidx);
1751 T("TxQ PIDX:", q.pidx);
1752 R("RspQ ID:", rspq.abs_id);
1753 R("RspQ size:", rspq.size);
1754 R("RspQE size:", rspq.iqe_len);
1755 R("RspQ CIDX:", rspq.cidx);
1756 R("RspQ Gen:", rspq.gen);
1757 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
1758 S3("u", "Intr pktcnt:",
1759 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
1760 R("FL ID:", fl.cntxt_id);
1761 R("FL size:", fl.size - 8);
1762 R("FL pend:", fl.pend_cred);
1763 R("FL avail:", fl.avail);
1764 R("FL PIDX:", fl.pidx);
1765 R("FL CIDX:", fl.cidx);
1766 } else if (rdma_idx < rdma_entries) {
1767 const struct sge_ofld_rxq *rx =
1768 &adap->sge.rdmarxq[rdma_idx * 4];
1769 int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
1770
1771 S("QType:", "RDMA-CPL");
1772 R("RspQ ID:", rspq.abs_id);
1773 R("RspQ size:", rspq.size);
1774 R("RspQE size:", rspq.iqe_len);
1775 R("RspQ CIDX:", rspq.cidx);
1776 R("RspQ Gen:", rspq.gen);
1777 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
1778 S3("u", "Intr pktcnt:",
1779 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
1780 R("FL ID:", fl.cntxt_id);
1781 R("FL size:", fl.size - 8);
1782 R("FL pend:", fl.pend_cred);
1783 R("FL avail:", fl.avail);
1784 R("FL PIDX:", fl.pidx);
1785 R("FL CIDX:", fl.cidx);
1786 } else if (ciq_idx < ciq_entries) {
1787 const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
1788 int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
1789
1790 S("QType:", "RDMA-CIQ");
1791 R("RspQ ID:", rspq.abs_id);
1792 R("RspQ size:", rspq.size);
1793 R("RspQE size:", rspq.iqe_len);
1794 R("RspQ CIDX:", rspq.cidx);
1795 R("RspQ Gen:", rspq.gen);
1796 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
1797 S3("u", "Intr pktcnt:",
1798 adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
1799 } else if (ctrl_idx < ctrl_entries) {
1800 const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
1801 int n = min(4, adap->params.nports - 4 * ctrl_idx);
1802
1803 S("QType:", "Control");
1804 T("TxQ ID:", q.cntxt_id);
1805 T("TxQ size:", q.size);
1806 T("TxQ inuse:", q.in_use);
1807 T("TxQ CIDX:", q.cidx);
1808 T("TxQ PIDX:", q.pidx);
1809 } else if (fq_idx == 0) {
1810 const struct sge_rspq *evtq = &adap->sge.fw_evtq;
1811
1812 seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1813 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1814 seq_printf(seq, "%-12s %16u\n", "RspQ size:", evtq->size);
1815 seq_printf(seq, "%-12s %16u\n", "RspQE size:", evtq->iqe_len);
1816 seq_printf(seq, "%-12s %16u\n", "RspQ CIDX:", evtq->cidx);
1817 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1818 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1819 qtimer_val(adap, evtq));
1820 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1821 adap->sge.counter_val[evtq->pktcnt_idx]);
1822 }
1823#undef R
1824#undef T
1825#undef S
1826#undef S3
1827return 0;
1828}
1829
1830static int sge_queue_entries(const struct adapter *adap)
1831{
1832 return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
1833 DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
1834 DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
1835 DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
1836 DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
1837}
1838
1839static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
1840{
1841 int entries = sge_queue_entries(seq->private);
1842
1843 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1844}
1845
1846static void sge_queue_stop(struct seq_file *seq, void *v)
1847{
1848}
1849
1850static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
1851{
1852 int entries = sge_queue_entries(seq->private);
1853
1854 ++*pos;
1855 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1856}
1857
1858static const struct seq_operations sge_qinfo_seq_ops = {
1859 .start = sge_queue_start,
1860 .next = sge_queue_next,
1861 .stop = sge_queue_stop,
1862 .show = sge_qinfo_show
1863};
1864
1865static int sge_qinfo_open(struct inode *inode, struct file *file)
1866{
1867 int res = seq_open(file, &sge_qinfo_seq_ops);
1868
1869 if (!res) {
1870 struct seq_file *seq = file->private_data;
1871
1872 seq->private = inode->i_private;
1873 }
1874 return res;
1875}
1876
1877static const struct file_operations sge_qinfo_debugfs_fops = {
1878 .owner = THIS_MODULE,
1879 .open = sge_qinfo_open,
1880 .read = seq_read,
1881 .llseek = seq_lseek,
1882 .release = seq_release,
1883};
1884
1885int mem_open(struct inode *inode, struct file *file)
1886{
1887 unsigned int mem;
1888 struct adapter *adap;
1889
1890 file->private_data = inode->i_private;
1891
1892 mem = (uintptr_t)file->private_data & 0x3;
1893 adap = file->private_data - mem;
1894
1895 (void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH);
1896
1897 return 0;
1898}
1899
46static ssize_t mem_read(struct file *file, char __user *buf, size_t count, 1900static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
47 loff_t *ppos) 1901 loff_t *ppos)
48{ 1902{
@@ -80,7 +1934,6 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
80 *ppos = pos + count; 1934 *ppos = pos + count;
81 return count; 1935 return count;
82} 1936}
83
84static const struct file_operations mem_debugfs_fops = { 1937static const struct file_operations mem_debugfs_fops = {
85 .owner = THIS_MODULE, 1938 .owner = THIS_MODULE,
86 .open = simple_open, 1939 .open = simple_open,
@@ -88,6 +1941,12 @@ static const struct file_operations mem_debugfs_fops = {
88 .llseek = default_llseek, 1941 .llseek = default_llseek,
89}; 1942};
90 1943
1944static void set_debugfs_file_size(struct dentry *de, loff_t size)
1945{
1946 if (!IS_ERR(de) && de->d_inode)
1947 de->d_inode->i_size = size;
1948}
1949
91static void add_debugfs_mem(struct adapter *adap, const char *name, 1950static void add_debugfs_mem(struct adapter *adap, const char *name,
92 unsigned int idx, unsigned int size_mb) 1951 unsigned int idx, unsigned int size_mb)
93{ 1952{
@@ -119,14 +1978,65 @@ int t4_setup_debugfs(struct adapter *adap)
119{ 1978{
120 int i; 1979 int i;
121 u32 size; 1980 u32 size;
1981 struct dentry *de;
122 1982
123 static struct t4_debugfs_entry t4_debugfs_files[] = { 1983 static struct t4_debugfs_entry t4_debugfs_files[] = {
1984 { "cim_la", &cim_la_fops, S_IRUSR, 0 },
1985 { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
1986 { "clk", &clk_debugfs_fops, S_IRUSR, 0 },
1987 { "devlog", &devlog_fops, S_IRUSR, 0 },
1988 { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
1989 { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
1990 { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
1991 { "mbox3", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
1992 { "mbox4", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 4 },
1993 { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
1994 { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
1995 { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
124 { "l2t", &t4_l2t_fops, S_IRUSR, 0}, 1996 { "l2t", &t4_l2t_fops, S_IRUSR, 0},
1997 { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
1998 { "rss", &rss_debugfs_fops, S_IRUSR, 0 },
1999 { "rss_config", &rss_config_debugfs_fops, S_IRUSR, 0 },
2000 { "rss_key", &rss_key_debugfs_fops, S_IRUSR, 0 },
2001 { "rss_pf_config", &rss_pf_config_debugfs_fops, S_IRUSR, 0 },
2002 { "rss_vf_config", &rss_vf_config_debugfs_fops, S_IRUSR, 0 },
2003 { "sge_qinfo", &sge_qinfo_debugfs_fops, S_IRUSR, 0 },
2004 { "ibq_tp0", &cim_ibq_fops, S_IRUSR, 0 },
2005 { "ibq_tp1", &cim_ibq_fops, S_IRUSR, 1 },
2006 { "ibq_ulp", &cim_ibq_fops, S_IRUSR, 2 },
2007 { "ibq_sge0", &cim_ibq_fops, S_IRUSR, 3 },
2008 { "ibq_sge1", &cim_ibq_fops, S_IRUSR, 4 },
2009 { "ibq_ncsi", &cim_ibq_fops, S_IRUSR, 5 },
2010 { "obq_ulp0", &cim_obq_fops, S_IRUSR, 0 },
2011 { "obq_ulp1", &cim_obq_fops, S_IRUSR, 1 },
2012 { "obq_ulp2", &cim_obq_fops, S_IRUSR, 2 },
2013 { "obq_ulp3", &cim_obq_fops, S_IRUSR, 3 },
2014 { "obq_sge", &cim_obq_fops, S_IRUSR, 4 },
2015 { "obq_ncsi", &cim_obq_fops, S_IRUSR, 5 },
2016 { "tp_la", &tp_la_fops, S_IRUSR, 0 },
2017 { "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 },
2018 { "sensors", &sensors_debugfs_fops, S_IRUSR, 0 },
2019 { "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 },
2020 { "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 },
2021#if IS_ENABLED(CONFIG_IPV6)
2022 { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
2023#endif
2024 };
2025
2026 /* Debug FS nodes common to all T5 and later adapters.
2027 */
2028 static struct t4_debugfs_entry t5_debugfs_files[] = {
2029 { "obq_sge_rx_q0", &cim_obq_fops, S_IRUSR, 6 },
2030 { "obq_sge_rx_q1", &cim_obq_fops, S_IRUSR, 7 },
125 }; 2031 };
126 2032
127 add_debugfs_files(adap, 2033 add_debugfs_files(adap,
128 t4_debugfs_files, 2034 t4_debugfs_files,
129 ARRAY_SIZE(t4_debugfs_files)); 2035 ARRAY_SIZE(t4_debugfs_files));
2036 if (!is_t4(adap->params.chip))
2037 add_debugfs_files(adap,
2038 t5_debugfs_files,
2039 ARRAY_SIZE(t5_debugfs_files));
130 2040
131 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); 2041 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
132 if (i & EDRAM0_ENABLE_F) { 2042 if (i & EDRAM0_ENABLE_F) {
@@ -154,5 +2064,10 @@ int t4_setup_debugfs(struct adapter *adap)
154 EXT_MEM1_SIZE_G(size)); 2064 EXT_MEM1_SIZE_G(size));
155 } 2065 }
156 } 2066 }
2067
2068 de = debugfs_create_file("flash", S_IRUSR, adap->debugfs_root, adap,
2069 &flash_debugfs_fops);
2070 set_debugfs_file_size(de, adap->params.sf_size);
2071
157 return 0; 2072 return 0;
158} 2073}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index a3d8867efd3d..b63cfee2d963 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -37,6 +37,21 @@
37 37
38#include <linux/export.h> 38#include <linux/export.h>
39 39
40#define FILE_DATA(_file) ((_file)->f_path.dentry->d_inode)
41
42#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
43static int name##_open(struct inode *inode, struct file *file) \
44{ \
45 return single_open(file, name##_show, inode->i_private); \
46} \
47static const struct file_operations name##_debugfs_fops = { \
48 .owner = THIS_MODULE, \
49 .open = name##_open, \
50 .read = seq_read, \
51 .llseek = seq_lseek, \
52 .release = single_release \
53}
54
40struct t4_debugfs_entry { 55struct t4_debugfs_entry {
41 const char *name; 56 const char *name;
42 const struct file_operations *ops; 57 const struct file_operations *ops;
@@ -44,9 +59,27 @@ struct t4_debugfs_entry {
44 unsigned char data; 59 unsigned char data;
45}; 60};
46 61
62struct seq_tab {
63 int (*show)(struct seq_file *seq, void *v, int idx);
64 unsigned int rows; /* # of entries */
65 unsigned char width; /* size in bytes of each entry */
66 unsigned char skip_first; /* whether the first line is a header */
67 char data[0]; /* the table data */
68};
69
70static inline unsigned int hex2val(char c)
71{
72 return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
73}
74
75struct seq_tab *seq_open_tab(struct file *f, unsigned int rows,
76 unsigned int width, unsigned int have_header,
77 int (*show)(struct seq_file *seq, void *v, int i));
78
47int t4_setup_debugfs(struct adapter *adap); 79int t4_setup_debugfs(struct adapter *adap);
48void add_debugfs_files(struct adapter *adap, 80void add_debugfs_files(struct adapter *adap,
49 struct t4_debugfs_entry *files, 81 struct t4_debugfs_entry *files,
50 unsigned int nfiles); 82 unsigned int nfiles);
83int mem_open(struct inode *inode, struct file *file);
51 84
52#endif 85#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index ccf3436024bc..a22cf932ca35 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -62,14 +62,18 @@
62#include <net/netevent.h> 62#include <net/netevent.h>
63#include <net/addrconf.h> 63#include <net/addrconf.h>
64#include <net/bonding.h> 64#include <net/bonding.h>
65#include <net/addrconf.h>
65#include <asm/uaccess.h> 66#include <asm/uaccess.h>
66 67
67#include "cxgb4.h" 68#include "cxgb4.h"
68#include "t4_regs.h" 69#include "t4_regs.h"
70#include "t4_values.h"
69#include "t4_msg.h" 71#include "t4_msg.h"
70#include "t4fw_api.h" 72#include "t4fw_api.h"
73#include "t4fw_version.h"
71#include "cxgb4_dcb.h" 74#include "cxgb4_dcb.h"
72#include "cxgb4_debugfs.h" 75#include "cxgb4_debugfs.h"
76#include "clip_tbl.h"
73#include "l2t.h" 77#include "l2t.h"
74 78
75#ifdef DRV_VERSION 79#ifdef DRV_VERSION
@@ -78,99 +82,6 @@
78#define DRV_VERSION "2.0.0-ko" 82#define DRV_VERSION "2.0.0-ko"
79#define DRV_DESC "Chelsio T4/T5 Network Driver" 83#define DRV_DESC "Chelsio T4/T5 Network Driver"
80 84
81/*
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
84 * recover.
85 */
86#define MAX_SGE_TIMERVAL 200U
87
88enum {
89 /*
90 * Physical Function provisioning constants.
91 */
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
95 */
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
100
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
103
104#ifdef CONFIG_PCI_IOV
105 /*
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
111 * Egress Context.
112 */
113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
115
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
123
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
126#endif
127};
128
129/*
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
133 * controls.
134 */
135static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
137{
138 unsigned int portn, portvec;
139
140 /*
141 * Give PF's access to all of the ports.
142 */
143 if (vf == 0)
144 return FW_PFVF_CMD_PMASK_M;
145
146 /*
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
152 */
153 if (adapter->params.nports == 0)
154 return 0;
155
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
158 for (;;) {
159 /*
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
164 */
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
166 if (portn == 0)
167 return pmask;
168 portn--;
169 portvec &= ~pmask;
170 }
171 /*NOTREACHED*/
172}
173
174enum { 85enum {
175 MAX_TXQ_ENTRIES = 16384, 86 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024, 87 MAX_CTRL_TXQ_ENTRIES = 1024,
@@ -263,7 +174,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter")
263static uint force_old_init; 174static uint force_old_init;
264 175
265module_param(force_old_init, uint, 0644); 176module_param(force_old_init, uint, 0644);
266MODULE_PARM_DESC(force_old_init, "Force old initialization sequence"); 177MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
178 " parameter");
267 179
268static int dflt_msg_enable = DFLT_MSG_ENABLE; 180static int dflt_msg_enable = DFLT_MSG_ENABLE;
269 181
@@ -292,13 +204,14 @@ static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
292 204
293module_param_array(intr_holdoff, uint, NULL, 0644); 205module_param_array(intr_holdoff, uint, NULL, 0644);
294MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " 206MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
295 "0..4 in microseconds"); 207 "0..4 in microseconds, deprecated parameter");
296 208
297static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; 209static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
298 210
299module_param_array(intr_cnt, uint, NULL, 0644); 211module_param_array(intr_cnt, uint, NULL, 0644);
300MODULE_PARM_DESC(intr_cnt, 212MODULE_PARM_DESC(intr_cnt,
301 "thresholds 1..3 for queue interrupt packet counters"); 213 "thresholds 1..3 for queue interrupt packet counters, "
214 "deprecated parameter");
302 215
303/* 216/*
304 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers 217 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
@@ -318,7 +231,8 @@ static bool vf_acls;
318 231
319#ifdef CONFIG_PCI_IOV 232#ifdef CONFIG_PCI_IOV
320module_param(vf_acls, bool, 0644); 233module_param(vf_acls, bool, 0644);
321MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); 234MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
235 "deprecated parameter");
322 236
323/* Configure the number of PCI-E Virtual Function which are to be instantiated 237/* Configure the number of PCI-E Virtual Function which are to be instantiated
324 * on SR-IOV Capable Physical Functions. 238 * on SR-IOV Capable Physical Functions.
@@ -340,32 +254,11 @@ module_param(select_queue, int, 0644);
340MODULE_PARM_DESC(select_queue, 254MODULE_PARM_DESC(select_queue,
341 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method."); 255 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
342 256
343/* 257static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
344 * The filter TCAM has a fixed portion and a variable portion. The fixed
345 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
346 * ports. The variable portion is 36 bits which can include things like Exact
347 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
348 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
349 * far exceed the 36-bit budget for this "compressed" header portion of the
350 * filter. Thus, we have a scarce resource which must be carefully managed.
351 *
352 * By default we set this up to mostly match the set of filter matching
353 * capabilities of T3 but with accommodations for some of T4's more
354 * interesting features:
355 *
356 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
357 * [Inner] VLAN (17), Port (3), FCoE (1) }
358 */
359enum {
360 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
361 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
362 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
363};
364
365static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
366 258
367module_param(tp_vlan_pri_map, uint, 0644); 259module_param(tp_vlan_pri_map, uint, 0644);
368MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration"); 260MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
261 "deprecated parameter");
369 262
370static struct dentry *cxgb4_debugfs_root; 263static struct dentry *cxgb4_debugfs_root;
371 264
@@ -671,7 +564,7 @@ static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
671 if (idx >= adap->tids.ftid_base && nidx < 564 if (idx >= adap->tids.ftid_base && nidx <
672 (adap->tids.nftids + adap->tids.nsftids)) { 565 (adap->tids.nftids + adap->tids.nsftids)) {
673 idx = nidx; 566 idx = nidx;
674 ret = GET_TCB_COOKIE(rpl->cookie); 567 ret = TCB_COOKIE_G(rpl->cookie);
675 f = &adap->tids.ftid_tab[idx]; 568 f = &adap->tids.ftid_tab[idx];
676 569
677 if (ret == FW_FILTER_WR_FLT_DELETED) { 570 if (ret == FW_FILTER_WR_FLT_DELETED) {
@@ -723,7 +616,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
723 616
724 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 617 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
725 const struct cpl_sge_egr_update *p = (void *)rsp; 618 const struct cpl_sge_egr_update *p = (void *)rsp;
726 unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); 619 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
727 struct sge_txq *txq; 620 struct sge_txq *txq;
728 621
729 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; 622 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
@@ -833,11 +726,11 @@ static void disable_msi(struct adapter *adapter)
833static irqreturn_t t4_nondata_intr(int irq, void *cookie) 726static irqreturn_t t4_nondata_intr(int irq, void *cookie)
834{ 727{
835 struct adapter *adap = cookie; 728 struct adapter *adap = cookie;
729 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
836 730
837 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); 731 if (v & PFSW_F) {
838 if (v & PFSW) {
839 adap->swintr = 1; 732 adap->swintr = 1;
840 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); 733 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
841 } 734 }
842 t4_slow_intr_handler(adap); 735 t4_slow_intr_handler(adap);
843 return IRQ_HANDLED; 736 return IRQ_HANDLED;
@@ -1030,8 +923,14 @@ static void quiesce_rx(struct adapter *adap)
1030 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { 923 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1031 struct sge_rspq *q = adap->sge.ingr_map[i]; 924 struct sge_rspq *q = adap->sge.ingr_map[i];
1032 925
1033 if (q && q->handler) 926 if (q && q->handler) {
1034 napi_disable(&q->napi); 927 napi_disable(&q->napi);
928 local_bh_disable();
929 while (!cxgb_poll_lock_napi(q))
930 mdelay(1);
931 local_bh_enable();
932 }
933
1035 } 934 }
1036} 935}
1037 936
@@ -1047,12 +946,14 @@ static void enable_rx(struct adapter *adap)
1047 946
1048 if (!q) 947 if (!q)
1049 continue; 948 continue;
1050 if (q->handler) 949 if (q->handler) {
950 cxgb_busy_poll_init_lock(q);
1051 napi_enable(&q->napi); 951 napi_enable(&q->napi);
952 }
1052 /* 0-increment GTS to start the timer and enable interrupts */ 953 /* 0-increment GTS to start the timer and enable interrupts */
1053 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), 954 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
1054 SEINTARM(q->intr_params) | 955 SEINTARM_V(q->intr_params) |
1055 INGRESSQID(q->cntxt_id)); 956 INGRESSQID_V(q->cntxt_id));
1056 } 957 }
1057} 958}
1058 959
@@ -1176,10 +1077,10 @@ freeout: t4_free_sge_resources(adap);
1176 } 1077 }
1177 1078
1178 t4_write_reg(adap, is_t4(adap->params.chip) ? 1079 t4_write_reg(adap, is_t4(adap->params.chip) ?
1179 MPS_TRC_RSS_CONTROL : 1080 MPS_TRC_RSS_CONTROL_A :
1180 MPS_T5_TRC_RSS_CONTROL, 1081 MPS_T5_TRC_RSS_CONTROL_A,
1181 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | 1082 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1182 QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); 1083 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1183 return 0; 1084 return 0;
1184} 1085}
1185 1086
@@ -1518,6 +1419,7 @@ static int get_eeprom_len(struct net_device *dev)
1518static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1419static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1519{ 1420{
1520 struct adapter *adapter = netdev2adap(dev); 1421 struct adapter *adapter = netdev2adap(dev);
1422 u32 exprom_vers;
1521 1423
1522 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1424 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1523 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1425 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
@@ -1535,6 +1437,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1535 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), 1437 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
1536 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), 1438 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
1537 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); 1439 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
1440
1441 if (!t4_get_exprom_version(adapter, &exprom_vers))
1442 snprintf(info->erom_version, sizeof(info->erom_version),
1443 "%u.%u.%u.%u",
1444 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
1445 FW_HDR_FW_VER_MINOR_G(exprom_vers),
1446 FW_HDR_FW_VER_MICRO_G(exprom_vers),
1447 FW_HDR_FW_VER_BUILD_G(exprom_vers));
1538} 1448}
1539 1449
1540static void get_strings(struct net_device *dev, u32 stringset, u8 *data) 1450static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1589,9 +1499,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1589 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 1499 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1590 data += sizeof(struct queue_port_stats) / sizeof(u64); 1500 data += sizeof(struct queue_port_stats) / sizeof(u64);
1591 if (!is_t4(adapter->params.chip)) { 1501 if (!is_t4(adapter->params.chip)) {
1592 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); 1502 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
1593 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); 1503 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
1594 val2 = t4_read_reg(adapter, SGE_STAT_MATCH); 1504 val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
1595 *data = val1 - val2; 1505 *data = val1 - val2;
1596 data++; 1506 data++;
1597 *data = val2; 1507 *data = val2;
@@ -2608,8 +2518,8 @@ static int closest_thres(const struct sge *s, int thres)
2608/* 2518/*
2609 * Return a queue's interrupt hold-off time in us. 0 means no timer. 2519 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2610 */ 2520 */
2611static unsigned int qtimer_val(const struct adapter *adap, 2521unsigned int qtimer_val(const struct adapter *adap,
2612 const struct sge_rspq *q) 2522 const struct sge_rspq *q)
2613{ 2523{
2614 unsigned int idx = q->intr_params >> 1; 2524 unsigned int idx = q->intr_params >> 1;
2615 2525
@@ -3346,40 +3256,6 @@ static int tid_init(struct tid_info *t)
3346 return 0; 3256 return 0;
3347} 3257}
3348 3258
3349int cxgb4_clip_get(const struct net_device *dev,
3350 const struct in6_addr *lip)
3351{
3352 struct adapter *adap;
3353 struct fw_clip_cmd c;
3354
3355 adap = netdev2adap(dev);
3356 memset(&c, 0, sizeof(c));
3357 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3358 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3359 c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
3360 c.ip_hi = *(__be64 *)(lip->s6_addr);
3361 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3362 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3363}
3364EXPORT_SYMBOL(cxgb4_clip_get);
3365
3366int cxgb4_clip_release(const struct net_device *dev,
3367 const struct in6_addr *lip)
3368{
3369 struct adapter *adap;
3370 struct fw_clip_cmd c;
3371
3372 adap = netdev2adap(dev);
3373 memset(&c, 0, sizeof(c));
3374 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3375 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3376 c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
3377 c.ip_hi = *(__be64 *)(lip->s6_addr);
3378 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3379 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3380}
3381EXPORT_SYMBOL(cxgb4_clip_release);
3382
3383/** 3259/**
3384 * cxgb4_create_server - create an IP server 3260 * cxgb4_create_server - create an IP server
3385 * @dev: the device 3261 * @dev: the device
@@ -3415,8 +3291,8 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3415 req->peer_ip = htonl(0); 3291 req->peer_ip = htonl(0);
3416 chan = rxq_to_chan(&adap->sge, queue); 3292 chan = rxq_to_chan(&adap->sge, queue);
3417 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); 3293 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3418 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 3294 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3419 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 3295 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
3420 ret = t4_mgmt_tx(adap, skb); 3296 ret = t4_mgmt_tx(adap, skb);
3421 return net_xmit_eval(ret); 3297 return net_xmit_eval(ret);
3422} 3298}
@@ -3458,8 +3334,8 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3458 req->peer_ip_lo = cpu_to_be64(0); 3334 req->peer_ip_lo = cpu_to_be64(0);
3459 chan = rxq_to_chan(&adap->sge, queue); 3335 chan = rxq_to_chan(&adap->sge, queue);
3460 req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); 3336 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3461 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 3337 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3462 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 3338 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
3463 ret = t4_mgmt_tx(adap, skb); 3339 ret = t4_mgmt_tx(adap, skb);
3464 return net_xmit_eval(ret); 3340 return net_xmit_eval(ret);
3465} 3341}
@@ -3482,8 +3358,8 @@ int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3482 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); 3358 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3483 INIT_TP_WR(req, 0); 3359 INIT_TP_WR(req, 0);
3484 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); 3360 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3485 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) : 3361 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
3486 LISTSVR_IPV6(0)) | QUEUENO(queue)); 3362 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
3487 ret = t4_mgmt_tx(adap, skb); 3363 ret = t4_mgmt_tx(adap, skb);
3488 return net_xmit_eval(ret); 3364 return net_xmit_eval(ret);
3489} 3365}
@@ -3600,14 +3476,14 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3600 struct adapter *adap = netdev2adap(dev); 3476 struct adapter *adap = netdev2adap(dev);
3601 u32 v1, v2, lp_count, hp_count; 3477 u32 v1, v2, lp_count, hp_count;
3602 3478
3603 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3479 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3604 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); 3480 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
3605 if (is_t4(adap->params.chip)) { 3481 if (is_t4(adap->params.chip)) {
3606 lp_count = G_LP_COUNT(v1); 3482 lp_count = LP_COUNT_G(v1);
3607 hp_count = G_HP_COUNT(v1); 3483 hp_count = HP_COUNT_G(v1);
3608 } else { 3484 } else {
3609 lp_count = G_LP_COUNT_T5(v1); 3485 lp_count = LP_COUNT_T5_G(v1);
3610 hp_count = G_HP_COUNT_T5(v2); 3486 hp_count = HP_COUNT_T5_G(v2);
3611 } 3487 }
3612 return lpfifo ? lp_count : hp_count; 3488 return lpfifo ? lp_count : hp_count;
3613} 3489}
@@ -3653,10 +3529,10 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3653{ 3529{
3654 struct adapter *adap = netdev2adap(dev); 3530 struct adapter *adap = netdev2adap(dev);
3655 3531
3656 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); 3532 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
3657 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | 3533 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
3658 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | 3534 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
3659 HPZ3(pgsz_order[3])); 3535 HPZ3_V(pgsz_order[3]));
3660} 3536}
3661EXPORT_SYMBOL(cxgb4_iscsi_init); 3537EXPORT_SYMBOL(cxgb4_iscsi_init);
3662 3538
@@ -3666,14 +3542,14 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
3666 int ret; 3542 int ret;
3667 3543
3668 ret = t4_fwaddrspace_write(adap, adap->mbox, 3544 ret = t4_fwaddrspace_write(adap, adap->mbox,
3669 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000); 3545 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
3670 return ret; 3546 return ret;
3671} 3547}
3672EXPORT_SYMBOL(cxgb4_flush_eq_cache); 3548EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3673 3549
3674static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) 3550static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3675{ 3551{
3676 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8; 3552 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
3677 __be64 indices; 3553 __be64 indices;
3678 int ret; 3554 int ret;
3679 3555
@@ -3702,14 +3578,20 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3702 3578
3703 if (pidx != hw_pidx) { 3579 if (pidx != hw_pidx) {
3704 u16 delta; 3580 u16 delta;
3581 u32 val;
3705 3582
3706 if (pidx >= hw_pidx) 3583 if (pidx >= hw_pidx)
3707 delta = pidx - hw_pidx; 3584 delta = pidx - hw_pidx;
3708 else 3585 else
3709 delta = size - hw_pidx + pidx; 3586 delta = size - hw_pidx + pidx;
3587
3588 if (is_t4(adap->params.chip))
3589 val = PIDX_V(delta);
3590 else
3591 val = PIDX_T5_V(delta);
3710 wmb(); 3592 wmb();
3711 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 3593 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3712 QID(qid) | PIDX(delta)); 3594 QID_V(qid) | val);
3713 } 3595 }
3714out: 3596out:
3715 return ret; 3597 return ret;
@@ -3721,8 +3603,8 @@ void cxgb4_disable_db_coalescing(struct net_device *dev)
3721 struct adapter *adap; 3603 struct adapter *adap;
3722 3604
3723 adap = netdev2adap(dev); 3605 adap = netdev2adap(dev);
3724 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 3606 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
3725 F_NOCOALESCE); 3607 NOCOALESCE_F);
3726} 3608}
3727EXPORT_SYMBOL(cxgb4_disable_db_coalescing); 3609EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3728 3610
@@ -3731,7 +3613,7 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
3731 struct adapter *adap; 3613 struct adapter *adap;
3732 3614
3733 adap = netdev2adap(dev); 3615 adap = netdev2adap(dev);
3734 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0); 3616 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
3735} 3617}
3736EXPORT_SYMBOL(cxgb4_enable_db_coalescing); 3618EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3737 3619
@@ -3809,8 +3691,8 @@ u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3809 struct adapter *adap; 3691 struct adapter *adap;
3810 3692
3811 adap = netdev2adap(dev); 3693 adap = netdev2adap(dev);
3812 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO); 3694 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
3813 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI)); 3695 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
3814 3696
3815 return ((u64)hi << 32) | (u64)lo; 3697 return ((u64)hi << 32) | (u64)lo;
3816} 3698}
@@ -3870,14 +3752,14 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
3870 u32 v1, v2, lp_count, hp_count; 3752 u32 v1, v2, lp_count, hp_count;
3871 3753
3872 do { 3754 do {
3873 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3755 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3874 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); 3756 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
3875 if (is_t4(adap->params.chip)) { 3757 if (is_t4(adap->params.chip)) {
3876 lp_count = G_LP_COUNT(v1); 3758 lp_count = LP_COUNT_G(v1);
3877 hp_count = G_HP_COUNT(v1); 3759 hp_count = HP_COUNT_G(v1);
3878 } else { 3760 } else {
3879 lp_count = G_LP_COUNT_T5(v1); 3761 lp_count = LP_COUNT_T5_G(v1);
3880 hp_count = G_HP_COUNT_T5(v2); 3762 hp_count = HP_COUNT_T5_G(v2);
3881 } 3763 }
3882 3764
3883 if (lp_count == 0 && hp_count == 0) 3765 if (lp_count == 0 && hp_count == 0)
@@ -3904,8 +3786,8 @@ static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3904 * are committed before we tell HW about them. 3786 * are committed before we tell HW about them.
3905 */ 3787 */
3906 wmb(); 3788 wmb();
3907 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 3789 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3908 QID(q->cntxt_id) | PIDX(q->db_pidx_inc)); 3790 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
3909 q->db_pidx_inc = 0; 3791 q->db_pidx_inc = 0;
3910 } 3792 }
3911 q->db_disabled = 0; 3793 q->db_disabled = 0;
@@ -3952,9 +3834,9 @@ static void process_db_full(struct work_struct *work)
3952 drain_db_fifo(adap, dbfifo_drain_delay); 3834 drain_db_fifo(adap, dbfifo_drain_delay);
3953 enable_dbs(adap); 3835 enable_dbs(adap);
3954 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); 3836 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3955 t4_set_reg_field(adap, SGE_INT_ENABLE3, 3837 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
3956 DBFIFO_HP_INT | DBFIFO_LP_INT, 3838 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
3957 DBFIFO_HP_INT | DBFIFO_LP_INT); 3839 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
3958} 3840}
3959 3841
3960static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) 3842static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3968,14 +3850,20 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3968 goto out; 3850 goto out;
3969 if (q->db_pidx != hw_pidx) { 3851 if (q->db_pidx != hw_pidx) {
3970 u16 delta; 3852 u16 delta;
3853 u32 val;
3971 3854
3972 if (q->db_pidx >= hw_pidx) 3855 if (q->db_pidx >= hw_pidx)
3973 delta = q->db_pidx - hw_pidx; 3856 delta = q->db_pidx - hw_pidx;
3974 else 3857 else
3975 delta = q->size - hw_pidx + q->db_pidx; 3858 delta = q->size - hw_pidx + q->db_pidx;
3859
3860 if (is_t4(adap->params.chip))
3861 val = PIDX_V(delta);
3862 else
3863 val = PIDX_T5_V(delta);
3976 wmb(); 3864 wmb();
3977 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 3865 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3978 QID(q->cntxt_id) | PIDX(delta)); 3866 QID_V(q->cntxt_id) | val);
3979 } 3867 }
3980out: 3868out:
3981 q->db_disabled = 0; 3869 q->db_disabled = 0;
@@ -4024,14 +3912,14 @@ static void process_db_drop(struct work_struct *work)
4024 dev_err(adap->pdev_dev, "doorbell drop recovery: " 3912 dev_err(adap->pdev_dev, "doorbell drop recovery: "
4025 "qid=%d, pidx_inc=%d\n", qid, pidx_inc); 3913 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
4026 else 3914 else
4027 writel(PIDX_T5(pidx_inc) | QID(bar2_qid), 3915 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
4028 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); 3916 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
4029 3917
4030 /* Re-enable BAR2 WC */ 3918 /* Re-enable BAR2 WC */
4031 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); 3919 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4032 } 3920 }
4033 3921
4034 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0); 3922 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
4035} 3923}
4036 3924
4037void t4_db_full(struct adapter *adap) 3925void t4_db_full(struct adapter *adap)
@@ -4039,8 +3927,8 @@ void t4_db_full(struct adapter *adap)
4039 if (is_t4(adap->params.chip)) { 3927 if (is_t4(adap->params.chip)) {
4040 disable_dbs(adap); 3928 disable_dbs(adap);
4041 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 3929 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4042 t4_set_reg_field(adap, SGE_INT_ENABLE3, 3930 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
4043 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 3931 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
4044 queue_work(adap->workq, &adap->db_full_task); 3932 queue_work(adap->workq, &adap->db_full_task);
4045 } 3933 }
4046} 3934}
@@ -4081,7 +3969,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
4081 lli.nports = adap->params.nports; 3969 lli.nports = adap->params.nports;
4082 lli.wr_cred = adap->params.ofldq_wr_cred; 3970 lli.wr_cred = adap->params.ofldq_wr_cred;
4083 lli.adapter_type = adap->params.chip; 3971 lli.adapter_type = adap->params.chip;
4084 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); 3972 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
4085 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; 3973 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4086 lli.udb_density = 1 << adap->params.sge.eq_qpp; 3974 lli.udb_density = 1 << adap->params.sge.eq_qpp;
4087 lli.ucq_density = 1 << adap->params.sge.iq_qpp; 3975 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
@@ -4089,8 +3977,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
4089 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 3977 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4090 for (i = 0; i < NCHAN; i++) 3978 for (i = 0; i < NCHAN; i++)
4091 lli.tx_modq[i] = i; 3979 lli.tx_modq[i] = i;
4092 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); 3980 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
4093 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); 3981 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
4094 lli.fw_vers = adap->params.fw_vers; 3982 lli.fw_vers = adap->params.fw_vers;
4095 lli.dbfifo_int_thresh = dbfifo_int_thresh; 3983 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4096 lli.sge_ingpadboundary = adap->sge.fl_align; 3984 lli.sge_ingpadboundary = adap->sge.fl_align;
@@ -4220,148 +4108,61 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
4220} 4108}
4221EXPORT_SYMBOL(cxgb4_unregister_uld); 4109EXPORT_SYMBOL(cxgb4_unregister_uld);
4222 4110
4223/* Check if netdev on which event is occured belongs to us or not. Return
4224 * success (true) if it belongs otherwise failure (false).
4225 * Called with rcu_read_lock() held.
4226 */
4227#if IS_ENABLED(CONFIG_IPV6) 4111#if IS_ENABLED(CONFIG_IPV6)
4228static bool cxgb4_netdev(const struct net_device *netdev) 4112static int cxgb4_inet6addr_handler(struct notifier_block *this,
4113 unsigned long event, void *data)
4229{ 4114{
4115 struct inet6_ifaddr *ifa = data;
4116 struct net_device *event_dev = ifa->idev->dev;
4117 const struct device *parent = NULL;
4118#if IS_ENABLED(CONFIG_BONDING)
4230 struct adapter *adap; 4119 struct adapter *adap;
4231 int i; 4120#endif
4232 4121 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
4233 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node) 4122 event_dev = vlan_dev_real_dev(event_dev);
4234 for (i = 0; i < MAX_NPORTS; i++) 4123#if IS_ENABLED(CONFIG_BONDING)
4235 if (adap->port[i] == netdev) 4124 if (event_dev->flags & IFF_MASTER) {
4236 return true; 4125 list_for_each_entry(adap, &adapter_list, list_node) {
4237 return false; 4126 switch (event) {
4238} 4127 case NETDEV_UP:
4128 cxgb4_clip_get(adap->port[0],
4129 (const u32 *)ifa, 1);
4130 break;
4131 case NETDEV_DOWN:
4132 cxgb4_clip_release(adap->port[0],
4133 (const u32 *)ifa, 1);
4134 break;
4135 default:
4136 break;
4137 }
4138 }
4139 return NOTIFY_OK;
4140 }
4141#endif
4239 4142
4240static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, 4143 if (event_dev)
4241 unsigned long event) 4144 parent = event_dev->dev.parent;
4242{
4243 int ret = NOTIFY_DONE;
4244 4145
4245 rcu_read_lock(); 4146 if (parent && parent->driver == &cxgb4_driver.driver) {
4246 if (cxgb4_netdev(event_dev)) {
4247 switch (event) { 4147 switch (event) {
4248 case NETDEV_UP: 4148 case NETDEV_UP:
4249 ret = cxgb4_clip_get(event_dev, &ifa->addr); 4149 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
4250 if (ret < 0) {
4251 rcu_read_unlock();
4252 return ret;
4253 }
4254 ret = NOTIFY_OK;
4255 break; 4150 break;
4256 case NETDEV_DOWN: 4151 case NETDEV_DOWN:
4257 cxgb4_clip_release(event_dev, &ifa->addr); 4152 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
4258 ret = NOTIFY_OK;
4259 break; 4153 break;
4260 default: 4154 default:
4261 break; 4155 break;
4262 } 4156 }
4263 } 4157 }
4264 rcu_read_unlock(); 4158 return NOTIFY_OK;
4265 return ret;
4266}
4267
4268static int cxgb4_inet6addr_handler(struct notifier_block *this,
4269 unsigned long event, void *data)
4270{
4271 struct inet6_ifaddr *ifa = data;
4272 struct net_device *event_dev;
4273 int ret = NOTIFY_DONE;
4274 struct bonding *bond = netdev_priv(ifa->idev->dev);
4275 struct list_head *iter;
4276 struct slave *slave;
4277 struct pci_dev *first_pdev = NULL;
4278
4279 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4280 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4281 ret = clip_add(event_dev, ifa, event);
4282 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4283 /* It is possible that two different adapters are bonded in one
4284 * bond. We need to find such different adapters and add clip
4285 * in all of them only once.
4286 */
4287 bond_for_each_slave(bond, slave, iter) {
4288 if (!first_pdev) {
4289 ret = clip_add(slave->dev, ifa, event);
4290 /* If clip_add is success then only initialize
4291 * first_pdev since it means it is our device
4292 */
4293 if (ret == NOTIFY_OK)
4294 first_pdev = to_pci_dev(
4295 slave->dev->dev.parent);
4296 } else if (first_pdev !=
4297 to_pci_dev(slave->dev->dev.parent))
4298 ret = clip_add(slave->dev, ifa, event);
4299 }
4300 } else
4301 ret = clip_add(ifa->idev->dev, ifa, event);
4302
4303 return ret;
4304} 4159}
4305 4160
4161static bool inet6addr_registered;
4306static struct notifier_block cxgb4_inet6addr_notifier = { 4162static struct notifier_block cxgb4_inet6addr_notifier = {
4307 .notifier_call = cxgb4_inet6addr_handler 4163 .notifier_call = cxgb4_inet6addr_handler
4308}; 4164};
4309 4165
4310/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4311 * a physical device.
4312 * The physical device reference is needed to send the actul CLIP command.
4313 */
4314static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4315{
4316 struct inet6_dev *idev = NULL;
4317 struct inet6_ifaddr *ifa;
4318 int ret = 0;
4319
4320 idev = __in6_dev_get(root_dev);
4321 if (!idev)
4322 return ret;
4323
4324 read_lock_bh(&idev->lock);
4325 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4326 ret = cxgb4_clip_get(dev, &ifa->addr);
4327 if (ret < 0)
4328 break;
4329 }
4330 read_unlock_bh(&idev->lock);
4331
4332 return ret;
4333}
4334
4335static int update_root_dev_clip(struct net_device *dev)
4336{
4337 struct net_device *root_dev = NULL;
4338 int i, ret = 0;
4339
4340 /* First populate the real net device's IPv6 addresses */
4341 ret = update_dev_clip(dev, dev);
4342 if (ret)
4343 return ret;
4344
4345 /* Parse all bond and vlan devices layered on top of the physical dev */
4346 root_dev = netdev_master_upper_dev_get_rcu(dev);
4347 if (root_dev) {
4348 ret = update_dev_clip(root_dev, dev);
4349 if (ret)
4350 return ret;
4351 }
4352
4353 for (i = 0; i < VLAN_N_VID; i++) {
4354 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4355 if (!root_dev)
4356 continue;
4357
4358 ret = update_dev_clip(root_dev, dev);
4359 if (ret)
4360 break;
4361 }
4362 return ret;
4363}
4364
4365static void update_clip(const struct adapter *adap) 4166static void update_clip(const struct adapter *adap)
4366{ 4167{
4367 int i; 4168 int i;
@@ -4375,7 +4176,7 @@ static void update_clip(const struct adapter *adap)
4375 ret = 0; 4176 ret = 0;
4376 4177
4377 if (dev) 4178 if (dev)
4378 ret = update_root_dev_clip(dev); 4179 ret = cxgb4_update_root_dev_clip(dev);
4379 4180
4380 if (ret < 0) 4181 if (ret < 0)
4381 break; 4182 break;
@@ -4567,13 +4368,13 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4567 f->fs.val.lip[i] = val[i]; 4368 f->fs.val.lip[i] = val[i];
4568 f->fs.mask.lip[i] = ~0; 4369 f->fs.mask.lip[i] = ~0;
4569 } 4370 }
4570 if (adap->params.tp.vlan_pri_map & F_PORT) { 4371 if (adap->params.tp.vlan_pri_map & PORT_F) {
4571 f->fs.val.iport = port; 4372 f->fs.val.iport = port;
4572 f->fs.mask.iport = mask; 4373 f->fs.mask.iport = mask;
4573 } 4374 }
4574 } 4375 }
4575 4376
4576 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) { 4377 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
4577 f->fs.val.proto = IPPROTO_TCP; 4378 f->fs.val.proto = IPPROTO_TCP;
4578 f->fs.mask.proto = ~0; 4379 f->fs.mask.proto = ~0;
4579 } 4380 }
@@ -4779,11 +4580,15 @@ static const struct net_device_ops cxgb4_netdev_ops = {
4779#ifdef CONFIG_NET_POLL_CONTROLLER 4580#ifdef CONFIG_NET_POLL_CONTROLLER
4780 .ndo_poll_controller = cxgb_netpoll, 4581 .ndo_poll_controller = cxgb_netpoll,
4781#endif 4582#endif
4583#ifdef CONFIG_NET_RX_BUSY_POLL
4584 .ndo_busy_poll = cxgb_busy_poll,
4585#endif
4586
4782}; 4587};
4783 4588
4784void t4_fatal_err(struct adapter *adap) 4589void t4_fatal_err(struct adapter *adap)
4785{ 4590{
4786 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); 4591 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
4787 t4_intr_disable(adap); 4592 t4_intr_disable(adap);
4788 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); 4593 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4789} 4594}
@@ -4858,16 +4663,16 @@ static void setup_memwin(struct adapter *adap)
4858 mem_win2_base = MEMWIN2_BASE_T5; 4663 mem_win2_base = MEMWIN2_BASE_T5;
4859 mem_win2_aperture = MEMWIN2_APERTURE_T5; 4664 mem_win2_aperture = MEMWIN2_APERTURE_T5;
4860 } 4665 }
4861 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), 4666 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
4862 mem_win0_base | BIR(0) | 4667 mem_win0_base | BIR_V(0) |
4863 WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 4668 WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
4864 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), 4669 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
4865 mem_win1_base | BIR(0) | 4670 mem_win1_base | BIR_V(0) |
4866 WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 4671 WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
4867 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), 4672 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
4868 mem_win2_base | BIR(0) | 4673 mem_win2_base | BIR_V(0) |
4869 WINDOW(ilog2(mem_win2_aperture) - 10)); 4674 WINDOW_V(ilog2(mem_win2_aperture) - 10));
4870 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); 4675 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
4871} 4676}
4872 4677
4873static void setup_memwin_rdma(struct adapter *adap) 4678static void setup_memwin_rdma(struct adapter *adap)
@@ -4881,13 +4686,13 @@ static void setup_memwin_rdma(struct adapter *adap)
4881 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); 4686 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4882 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; 4687 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4883 t4_write_reg(adap, 4688 t4_write_reg(adap,
4884 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3), 4689 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
4885 start | BIR(1) | WINDOW(ilog2(sz_kb))); 4690 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
4886 t4_write_reg(adap, 4691 t4_write_reg(adap,
4887 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3), 4692 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
4888 adap->vres.ocq.start); 4693 adap->vres.ocq.start);
4889 t4_read_reg(adap, 4694 t4_read_reg(adap,
4890 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3)); 4695 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
4891 } 4696 }
4892} 4697}
4893 4698
@@ -4936,38 +4741,38 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4936 t4_sge_init(adap); 4741 t4_sge_init(adap);
4937 4742
4938 /* tweak some settings */ 4743 /* tweak some settings */
4939 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); 4744 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
4940 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); 4745 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
4941 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); 4746 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4942 v = t4_read_reg(adap, TP_PIO_DATA); 4747 v = t4_read_reg(adap, TP_PIO_DATA_A);
4943 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); 4748 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
4944 4749
4945 /* first 4 Tx modulation queues point to consecutive Tx channels */ 4750 /* first 4 Tx modulation queues point to consecutive Tx channels */
4946 adap->params.tp.tx_modq_map = 0xE4; 4751 adap->params.tp.tx_modq_map = 0xE4;
4947 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, 4752 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4948 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map)); 4753 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
4949 4754
4950 /* associate each Tx modulation queue with consecutive Tx channels */ 4755 /* associate each Tx modulation queue with consecutive Tx channels */
4951 v = 0x84218421; 4756 v = 0x84218421;
4952 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4757 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4953 &v, 1, A_TP_TX_SCHED_HDR); 4758 &v, 1, TP_TX_SCHED_HDR_A);
4954 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4759 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4955 &v, 1, A_TP_TX_SCHED_FIFO); 4760 &v, 1, TP_TX_SCHED_FIFO_A);
4956 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4761 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4957 &v, 1, A_TP_TX_SCHED_PCMD); 4762 &v, 1, TP_TX_SCHED_PCMD_A);
4958 4763
4959#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */ 4764#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4960 if (is_offload(adap)) { 4765 if (is_offload(adap)) {
4961 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 4766 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4962 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4767 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4963 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4768 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4964 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4769 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4965 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); 4770 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4966 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT, 4771 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4967 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4772 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4968 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4773 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4969 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 4774 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4970 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); 4775 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4971 } 4776 }
4972 4777
4973 /* get basic stuff going */ 4778 /* get basic stuff going */
@@ -5013,16 +4818,16 @@ static int adap_init0_tweaks(struct adapter *adapter)
5013 rx_dma_offset); 4818 rx_dma_offset);
5014 rx_dma_offset = 2; 4819 rx_dma_offset = 2;
5015 } 4820 }
5016 t4_set_reg_field(adapter, SGE_CONTROL, 4821 t4_set_reg_field(adapter, SGE_CONTROL_A,
5017 PKTSHIFT_MASK, 4822 PKTSHIFT_V(PKTSHIFT_M),
5018 PKTSHIFT(rx_dma_offset)); 4823 PKTSHIFT_V(rx_dma_offset));
5019 4824
5020 /* 4825 /*
5021 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux 4826 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5022 * adds the pseudo header itself. 4827 * adds the pseudo header itself.
5023 */ 4828 */
5024 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG, 4829 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
5025 CSUM_HAS_PSEUDO_HDR, 0); 4830 CSUM_HAS_PSEUDO_HDR_F, 0);
5026 4831
5027 return 0; 4832 return 0;
5028} 4833}
@@ -5046,7 +4851,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
5046 */ 4851 */
5047 if (reset) { 4852 if (reset) {
5048 ret = t4_fw_reset(adapter, adapter->mbox, 4853 ret = t4_fw_reset(adapter, adapter->mbox,
5049 PIORSTMODE | PIORST); 4854 PIORSTMODE_F | PIORST_F);
5050 if (ret < 0) 4855 if (ret < 0)
5051 goto bye; 4856 goto bye;
5052 } 4857 }
@@ -5212,12 +5017,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
5212 if (ret < 0) 5017 if (ret < 0)
5213 goto bye; 5018 goto bye;
5214 5019
5215 /* 5020 /* Emit Firmware Configuration File information and return
5216 * Return successfully and note that we're operating with parameters 5021 * successfully.
5217 * not supplied by the driver, rather than from hard-wired
5218 * initialization constants burried in the driver.
5219 */ 5022 */
5220 adapter->flags |= USING_SOFT_PARAMS;
5221 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ 5023 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5222 "Configuration File \"%s\", version %#x, computed checksum %#x\n", 5024 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5223 config_name, finiver, cfcsum); 5025 config_name, finiver, cfcsum);
@@ -5235,249 +5037,6 @@ bye:
5235 return ret; 5037 return ret;
5236} 5038}
5237 5039
5238/*
5239 * Attempt to initialize the adapter via hard-coded, driver supplied
5240 * parameters ...
5241 */
5242static int adap_init0_no_config(struct adapter *adapter, int reset)
5243{
5244 struct sge *s = &adapter->sge;
5245 struct fw_caps_config_cmd caps_cmd;
5246 u32 v;
5247 int i, ret;
5248
5249 /*
5250 * Reset device if necessary
5251 */
5252 if (reset) {
5253 ret = t4_fw_reset(adapter, adapter->mbox,
5254 PIORSTMODE | PIORST);
5255 if (ret < 0)
5256 goto bye;
5257 }
5258
5259 /*
5260 * Get device capabilities and select which we'll be using.
5261 */
5262 memset(&caps_cmd, 0, sizeof(caps_cmd));
5263 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5264 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5265 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5266 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5267 &caps_cmd);
5268 if (ret < 0)
5269 goto bye;
5270
5271 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5272 if (!vf_acls)
5273 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5274 else
5275 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5276 } else if (vf_acls) {
5277 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5278 goto bye;
5279 }
5280 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5281 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5282 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5283 NULL);
5284 if (ret < 0)
5285 goto bye;
5286
5287 /*
5288 * Tweak configuration based on system architecture, module
5289 * parameters, etc.
5290 */
5291 ret = adap_init0_tweaks(adapter);
5292 if (ret < 0)
5293 goto bye;
5294
5295 /*
5296 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5297 * mode which maps each Virtual Interface to its own section of
5298 * the RSS Table and we turn on all map and hash enables ...
5299 */
5300 adapter->flags |= RSS_TNLALLLOOKUP;
5301 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5302 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5303 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
5304 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F |
5305 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5306 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0));
5307 if (ret < 0)
5308 goto bye;
5309
5310 /*
5311 * Set up our own fundamental resource provisioning ...
5312 */
5313 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5314 PFRES_NEQ, PFRES_NETHCTRL,
5315 PFRES_NIQFLINT, PFRES_NIQ,
5316 PFRES_TC, PFRES_NVI,
5317 FW_PFVF_CMD_CMASK_M,
5318 pfvfres_pmask(adapter, adapter->fn, 0),
5319 PFRES_NEXACTF,
5320 PFRES_R_CAPS, PFRES_WX_CAPS);
5321 if (ret < 0)
5322 goto bye;
5323
5324 /*
5325 * Perform low level SGE initialization. We need to do this before we
5326 * send the firmware the INITIALIZE command because that will cause
5327 * any other PF Drivers which are waiting for the Master
5328 * Initialization to proceed forward.
5329 */
5330 for (i = 0; i < SGE_NTIMERS - 1; i++)
5331 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5332 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5333 s->counter_val[0] = 1;
5334 for (i = 1; i < SGE_NCOUNTERS; i++)
5335 s->counter_val[i] = min(intr_cnt[i - 1],
5336 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5337 t4_sge_init(adapter);
5338
5339#ifdef CONFIG_PCI_IOV
5340 /*
5341 * Provision resource limits for Virtual Functions. We currently
5342 * grant them all the same static resource limits except for the Port
5343 * Access Rights Mask which we're assigning based on the PF. All of
5344 * the static provisioning stuff for both the PF and VF really needs
5345 * to be managed in a persistent manner for each device which the
5346 * firmware controls.
5347 */
5348 {
5349 int pf, vf;
5350
5351 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5352 if (num_vf[pf] <= 0)
5353 continue;
5354
5355 /* VF numbering starts at 1! */
5356 for (vf = 1; vf <= num_vf[pf]; vf++) {
5357 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5358 pf, vf,
5359 VFRES_NEQ, VFRES_NETHCTRL,
5360 VFRES_NIQFLINT, VFRES_NIQ,
5361 VFRES_TC, VFRES_NVI,
5362 FW_PFVF_CMD_CMASK_M,
5363 pfvfres_pmask(
5364 adapter, pf, vf),
5365 VFRES_NEXACTF,
5366 VFRES_R_CAPS, VFRES_WX_CAPS);
5367 if (ret < 0)
5368 dev_warn(adapter->pdev_dev,
5369 "failed to "\
5370 "provision pf/vf=%d/%d; "
5371 "err=%d\n", pf, vf, ret);
5372 }
5373 }
5374 }
5375#endif
5376
5377 /*
5378 * Set up the default filter mode. Later we'll want to implement this
5379 * via a firmware command, etc. ... This needs to be done before the
5380 * firmare initialization command ... If the selected set of fields
5381 * isn't equal to the default value, we'll need to make sure that the
5382 * field selections will fit in the 36-bit budget.
5383 */
5384 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5385 int j, bits = 0;
5386
5387 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5388 switch (tp_vlan_pri_map & (1 << j)) {
5389 case 0:
5390 /* compressed filter field not enabled */
5391 break;
5392 case FCOE_MASK:
5393 bits += 1;
5394 break;
5395 case PORT_MASK:
5396 bits += 3;
5397 break;
5398 case VNIC_ID_MASK:
5399 bits += 17;
5400 break;
5401 case VLAN_MASK:
5402 bits += 17;
5403 break;
5404 case TOS_MASK:
5405 bits += 8;
5406 break;
5407 case PROTOCOL_MASK:
5408 bits += 8;
5409 break;
5410 case ETHERTYPE_MASK:
5411 bits += 16;
5412 break;
5413 case MACMATCH_MASK:
5414 bits += 9;
5415 break;
5416 case MPSHITTYPE_MASK:
5417 bits += 3;
5418 break;
5419 case FRAGMENTATION_MASK:
5420 bits += 1;
5421 break;
5422 }
5423
5424 if (bits > 36) {
5425 dev_err(adapter->pdev_dev,
5426 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5427 " using %#x\n", tp_vlan_pri_map, bits,
5428 TP_VLAN_PRI_MAP_DEFAULT);
5429 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5430 }
5431 }
5432 v = tp_vlan_pri_map;
5433 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5434 &v, 1, TP_VLAN_PRI_MAP);
5435
5436 /*
5437 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5438 * to support any of the compressed filter fields above. Newer
5439 * versions of the firmware do this automatically but it doesn't hurt
5440 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5441 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5442 * since the firmware automatically turns this on and off when we have
5443 * a non-zero number of filters active (since it does have a
5444 * performance impact).
5445 */
5446 if (tp_vlan_pri_map)
5447 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5448 FIVETUPLELOOKUP_MASK,
5449 FIVETUPLELOOKUP_MASK);
5450
5451 /*
5452 * Tweak some settings.
5453 */
5454 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5455 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5456 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5457 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5458
5459 /*
5460 * Get basic stuff going by issuing the Firmware Initialize command.
5461 * Note that this _must_ be after all PFVF commands ...
5462 */
5463 ret = t4_fw_initialize(adapter, adapter->mbox);
5464 if (ret < 0)
5465 goto bye;
5466
5467 /*
5468 * Return successfully!
5469 */
5470 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5471 "driver parameters\n");
5472 return 0;
5473
5474 /*
5475 * Something bad happened. Return the error ...
5476 */
5477bye:
5478 return ret;
5479}
5480
5481static struct fw_info fw_info_array[] = { 5040static struct fw_info fw_info_array[] = {
5482 { 5041 {
5483 .chip = CHELSIO_T4, 5042 .chip = CHELSIO_T4,
@@ -5529,6 +5088,8 @@ static int adap_init0(struct adapter *adap)
5529 enum dev_state state; 5088 enum dev_state state;
5530 u32 params[7], val[7]; 5089 u32 params[7], val[7];
5531 struct fw_caps_config_cmd caps_cmd; 5090 struct fw_caps_config_cmd caps_cmd;
5091 struct fw_devlog_cmd devlog_cmd;
5092 u32 devlog_meminfo;
5532 int reset = 1; 5093 int reset = 1;
5533 5094
5534 /* Contact FW, advertising Master capability */ 5095 /* Contact FW, advertising Master capability */
@@ -5590,8 +5151,7 @@ static int adap_init0(struct adapter *adap)
5590 state, &reset); 5151 state, &reset);
5591 5152
5592 /* Cleaning up */ 5153 /* Cleaning up */
5593 if (fw != NULL) 5154 release_firmware(fw);
5594 release_firmware(fw);
5595 t4_free_mem(card_fw); 5155 t4_free_mem(card_fw);
5596 5156
5597 if (ret < 0) 5157 if (ret < 0)
@@ -5609,6 +5169,30 @@ static int adap_init0(struct adapter *adap)
5609 if (ret < 0) 5169 if (ret < 0)
5610 goto bye; 5170 goto bye;
5611 5171
5172 /* Read firmware device log parameters. We really need to find a way
5173 * to get these parameters initialized with some default values (which
5174 * are likely to be correct) for the case where we either don't
5175 * attache to the firmware or it's crashed when we probe the adapter.
5176 * That way we'll still be able to perform early firmware startup
5177 * debugging ... If the request to get the Firmware's Device Log
5178 * parameters fails, we'll live so we don't make that a fatal error.
5179 */
5180 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
5181 devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
5182 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5183 devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
5184 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
5185 &devlog_cmd);
5186 if (ret == 0) {
5187 devlog_meminfo =
5188 ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
5189 adap->params.devlog.memtype =
5190 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
5191 adap->params.devlog.start =
5192 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
5193 adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
5194 }
5195
5612 /* 5196 /*
5613 * Find out what ports are available to us. Note that we need to do 5197 * Find out what ports are available to us. Note that we need to do
5614 * this before calling adap_init0_no_config() since it needs nports 5198 * this before calling adap_init0_no_config() since it needs nports
@@ -5624,88 +5208,58 @@ static int adap_init0(struct adapter *adap)
5624 adap->params.nports = hweight32(port_vec); 5208 adap->params.nports = hweight32(port_vec);
5625 adap->params.portvec = port_vec; 5209 adap->params.portvec = port_vec;
5626 5210
5627 /* 5211 /* If the firmware is initialized already, emit a simply note to that
5628 * If the firmware is initialized already (and we're not forcing a 5212 * effect. Otherwise, it's time to try initializing the adapter.
5629 * master initialization), note that we're living with existing
5630 * adapter parameters. Otherwise, it's time to try initializing the
5631 * adapter ...
5632 */ 5213 */
5633 if (state == DEV_STATE_INIT) { 5214 if (state == DEV_STATE_INIT) {
5634 dev_info(adap->pdev_dev, "Coming up as %s: "\ 5215 dev_info(adap->pdev_dev, "Coming up as %s: "\
5635 "Adapter already initialized\n", 5216 "Adapter already initialized\n",
5636 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); 5217 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5637 adap->flags |= USING_SOFT_PARAMS;
5638 } else { 5218 } else {
5639 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ 5219 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5640 "Initializing adapter\n"); 5220 "Initializing adapter\n");
5641 /* 5221
5642 * If the firmware doesn't support Configuration 5222 /* Find out whether we're dealing with a version of the
5643 * Files warn user and exit, 5223 * firmware which has configuration file support.
5644 */ 5224 */
5645 if (ret < 0) 5225 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5646 dev_warn(adap->pdev_dev, "Firmware doesn't support " 5226 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
5647 "configuration file.\n"); 5227 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5648 if (force_old_init) 5228 params, val);
5649 ret = adap_init0_no_config(adap, reset);
5650 else {
5651 /*
5652 * Find out whether we're dealing with a version of
5653 * the firmware which has configuration file support.
5654 */
5655 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5656 FW_PARAMS_PARAM_X_V(
5657 FW_PARAMS_PARAM_DEV_CF));
5658 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5659 params, val);
5660
5661 /*
5662 * If the firmware doesn't support Configuration
5663 * Files, use the old Driver-based, hard-wired
5664 * initialization. Otherwise, try using the
5665 * Configuration File support and fall back to the
5666 * Driver-based initialization if there's no
5667 * Configuration File found.
5668 */
5669 if (ret < 0)
5670 ret = adap_init0_no_config(adap, reset);
5671 else {
5672 /*
5673 * The firmware provides us with a memory
5674 * buffer where we can load a Configuration
5675 * File from the host if we want to override
5676 * the Configuration File in flash.
5677 */
5678 5229
5679 ret = adap_init0_config(adap, reset); 5230 /* If the firmware doesn't support Configuration Files,
5680 if (ret == -ENOENT) { 5231 * return an error.
5681 dev_info(adap->pdev_dev, 5232 */
5682 "No Configuration File present " 5233 if (ret < 0) {
5683 "on adapter. Using hard-wired " 5234 dev_err(adap->pdev_dev, "firmware doesn't support "
5684 "configuration parameters.\n"); 5235 "Firmware Configuration Files\n");
5685 ret = adap_init0_no_config(adap, reset); 5236 goto bye;
5686 } 5237 }
5687 } 5238
5239 /* The firmware provides us with a memory buffer where we can
5240 * load a Configuration File from the host if we want to
5241 * override the Configuration File in flash.
5242 */
5243 ret = adap_init0_config(adap, reset);
5244 if (ret == -ENOENT) {
5245 dev_err(adap->pdev_dev, "no Configuration File "
5246 "present on adapter.\n");
5247 goto bye;
5688 } 5248 }
5689 if (ret < 0) { 5249 if (ret < 0) {
5690 dev_err(adap->pdev_dev, 5250 dev_err(adap->pdev_dev, "could not initialize "
5691 "could not initialize adapter, error %d\n", 5251 "adapter, error %d\n", -ret);
5692 -ret);
5693 goto bye; 5252 goto bye;
5694 } 5253 }
5695 } 5254 }
5696 5255
5697 /* 5256 /* Give the SGE code a chance to pull in anything that it needs ...
5698 * If we're living with non-hard-coded parameters (either from a 5257 * Note that this must be called after we retrieve our VPD parameters
5699 * Firmware Configuration File or values programmed by a different PF 5258 * in order to know how to convert core ticks to seconds, etc.
5700 * Driver), give the SGE code a chance to pull in anything that it
5701 * needs ... Note that this must be called after we retrieve our VPD
5702 * parameters in order to know how to convert core ticks to seconds.
5703 */ 5259 */
5704 if (adap->flags & USING_SOFT_PARAMS) { 5260 ret = t4_sge_init(adap);
5705 ret = t4_sge_init(adap); 5261 if (ret < 0)
5706 if (ret < 0) 5262 goto bye;
5707 goto bye;
5708 }
5709 5263
5710 if (is_bypass_device(adap->pdev->device)) 5264 if (is_bypass_device(adap->pdev->device))
5711 adap->params.bypass = 1; 5265 adap->params.bypass = 1;
@@ -5739,6 +5293,14 @@ static int adap_init0(struct adapter *adap)
5739 adap->tids.nftids = val[4] - val[3] + 1; 5293 adap->tids.nftids = val[4] - val[3] + 1;
5740 adap->sge.ingr_start = val[5]; 5294 adap->sge.ingr_start = val[5];
5741 5295
5296 params[0] = FW_PARAM_PFVF(CLIP_START);
5297 params[1] = FW_PARAM_PFVF(CLIP_END);
5298 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5299 if (ret < 0)
5300 goto bye;
5301 adap->clipt_start = val[0];
5302 adap->clipt_end = val[1];
5303
5742 /* query params related to active filter region */ 5304 /* query params related to active filter region */
5743 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); 5305 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5744 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); 5306 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
@@ -6401,7 +5963,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6401 goto out_unmap_bar0; 5963 goto out_unmap_bar0;
6402 5964
6403 /* We control everything through one PF */ 5965 /* We control everything through one PF */
6404 func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); 5966 func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
6405 if (func != ent->driver_data) { 5967 if (func != ent->driver_data) {
6406 iounmap(regs); 5968 iounmap(regs);
6407 pci_disable_device(pdev); 5969 pci_disable_device(pdev);
@@ -6467,9 +6029,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6467 6029
6468 6030
6469 if (!is_t4(adapter->params.chip)) { 6031 if (!is_t4(adapter->params.chip)) {
6470 s_qpp = QUEUESPERPAGEPF1 * adapter->fn; 6032 s_qpp = (QUEUESPERPAGEPF0_S +
6471 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, 6033 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6472 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); 6034 adapter->fn);
6035 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6036 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
6473 num_seg = PAGE_SIZE / SEGMENT_SIZE; 6037 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6474 6038
6475 /* Each segment size is 128B. Write coalescing is enabled only 6039 /* Each segment size is 128B. Write coalescing is enabled only
@@ -6557,6 +6121,18 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6557 adapter->params.offload = 0; 6121 adapter->params.offload = 0;
6558 } 6122 }
6559 6123
6124#if IS_ENABLED(CONFIG_IPV6)
6125 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6126 adapter->clipt_end);
6127 if (!adapter->clipt) {
6128 /* We tolerate a lack of clip_table, giving up
6129 * some functionality
6130 */
6131 dev_warn(&pdev->dev,
6132 "could not allocate Clip table, continuing\n");
6133 adapter->params.offload = 0;
6134 }
6135#endif
6560 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { 6136 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6561 dev_warn(&pdev->dev, "could not allocate TID table, " 6137 dev_warn(&pdev->dev, "could not allocate TID table, "
6562 "continuing\n"); 6138 "continuing\n");
@@ -6682,6 +6258,9 @@ static void remove_one(struct pci_dev *pdev)
6682 cxgb_down(adapter); 6258 cxgb_down(adapter);
6683 6259
6684 free_some_resources(adapter); 6260 free_some_resources(adapter);
6261#if IS_ENABLED(CONFIG_IPV6)
6262 t4_cleanup_clip_tbl(adapter);
6263#endif
6685 iounmap(adapter->regs); 6264 iounmap(adapter->regs);
6686 if (!is_t4(adapter->params.chip)) 6265 if (!is_t4(adapter->params.chip))
6687 iounmap(adapter->bar2); 6266 iounmap(adapter->bar2);
@@ -6720,7 +6299,10 @@ static int __init cxgb4_init_module(void)
6720 debugfs_remove(cxgb4_debugfs_root); 6299 debugfs_remove(cxgb4_debugfs_root);
6721 6300
6722#if IS_ENABLED(CONFIG_IPV6) 6301#if IS_ENABLED(CONFIG_IPV6)
6723 register_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6302 if (!inet6addr_registered) {
6303 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6304 inet6addr_registered = true;
6305 }
6724#endif 6306#endif
6725 6307
6726 return ret; 6308 return ret;
@@ -6729,7 +6311,10 @@ static int __init cxgb4_init_module(void)
6729static void __exit cxgb4_cleanup_module(void) 6311static void __exit cxgb4_cleanup_module(void)
6730{ 6312{
6731#if IS_ENABLED(CONFIG_IPV6) 6313#if IS_ENABLED(CONFIG_IPV6)
6732 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6314 if (inet6addr_registered) {
6315 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6316 inet6addr_registered = false;
6317 }
6733#endif 6318#endif
6734 pci_unregister_driver(&cxgb4_driver); 6319 pci_unregister_driver(&cxgb4_driver);
6735 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ 6320 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 152b4c4c7809..78ab4d406ce2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -173,9 +173,6 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
173 unsigned char port, unsigned char mask); 173 unsigned char port, unsigned char mask);
174int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, 174int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
175 unsigned int queue, bool ipv6); 175 unsigned int queue, bool ipv6);
176int cxgb4_clip_get(const struct net_device *dev, const struct in6_addr *lip);
177int cxgb4_clip_release(const struct net_device *dev,
178 const struct in6_addr *lip);
179 176
180static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) 177static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
181{ 178{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index a047baa9fd04..252efc29321f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -46,6 +46,7 @@
46#include "t4_msg.h" 46#include "t4_msg.h"
47#include "t4fw_api.h" 47#include "t4fw_api.h"
48#include "t4_regs.h" 48#include "t4_regs.h"
49#include "t4_values.h"
49 50
50#define VLAN_NONE 0xfff 51#define VLAN_NONE 0xfff
51 52
@@ -150,8 +151,8 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
150 151
151 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, 152 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
152 e->idx | (sync ? F_SYNC_WR : 0) | 153 e->idx | (sync ? F_SYNC_WR : 0) |
153 TID_QID(adap->sge.fw_evtq.abs_id))); 154 TID_QID_V(adap->sge.fw_evtq.abs_id)));
154 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); 155 req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
155 req->l2t_idx = htons(e->idx); 156 req->l2t_idx = htons(e->idx);
156 req->vlan = htons(e->vlan); 157 req->vlan = htons(e->vlan);
157 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) 158 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
@@ -425,7 +426,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
425 * in the Compressed Filter Tuple. 426 * in the Compressed Filter Tuple.
426 */ 427 */
427 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) 428 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
428 ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift; 429 ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift;
429 430
430 if (tp->port_shift >= 0) 431 if (tp->port_shift >= 0)
431 ntuple |= (u64)l2t->lport << tp->port_shift; 432 ntuple |= (u64)l2t->lport << tp->port_shift;
@@ -439,9 +440,9 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
439 u32 pf = FW_VIID_PFN_G(viid); 440 u32 pf = FW_VIID_PFN_G(viid);
440 u32 vld = FW_VIID_VIVLD_G(viid); 441 u32 vld = FW_VIID_VIVLD_G(viid);
441 442
442 ntuple |= (u64)(V_FT_VNID_ID_VF(vf) | 443 ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
443 V_FT_VNID_ID_PF(pf) | 444 FT_VNID_ID_PF_V(pf) |
444 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; 445 FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
445 } 446 }
446 447
447 return ntuple; 448 return ntuple;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ebf935a1e352..b4b9f6048fe7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -43,8 +43,12 @@
43#include <linux/export.h> 43#include <linux/export.h>
44#include <net/ipv6.h> 44#include <net/ipv6.h>
45#include <net/tcp.h> 45#include <net/tcp.h>
46#ifdef CONFIG_NET_RX_BUSY_POLL
47#include <net/busy_poll.h>
48#endif /* CONFIG_NET_RX_BUSY_POLL */
46#include "cxgb4.h" 49#include "cxgb4.h"
47#include "t4_regs.h" 50#include "t4_regs.h"
51#include "t4_values.h"
48#include "t4_msg.h" 52#include "t4_msg.h"
49#include "t4fw_api.h" 53#include "t4fw_api.h"
50 54
@@ -521,10 +525,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
521{ 525{
522 u32 val; 526 u32 val;
523 if (q->pend_cred >= 8) { 527 if (q->pend_cred >= 8) {
524 val = PIDX(q->pend_cred / 8); 528 if (is_t4(adap->params.chip))
525 if (!is_t4(adap->params.chip)) 529 val = PIDX_V(q->pend_cred / 8);
526 val |= DBTYPE(1); 530 else
527 val |= DBPRIO(1); 531 val = PIDX_T5_V(q->pend_cred / 8) |
532 DBTYPE_F;
533 val |= DBPRIO_F;
528 wmb(); 534 wmb();
529 535
530 /* If we don't have access to the new User Doorbell (T5+), use 536 /* If we don't have access to the new User Doorbell (T5+), use
@@ -532,10 +538,10 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
532 * mechanism. 538 * mechanism.
533 */ 539 */
534 if (unlikely(q->bar2_addr == NULL)) { 540 if (unlikely(q->bar2_addr == NULL)) {
535 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 541 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
536 val | QID(q->cntxt_id)); 542 val | QID_V(q->cntxt_id));
537 } else { 543 } else {
538 writel(val | QID(q->bar2_qid), 544 writel(val | QID_V(q->bar2_qid),
539 q->bar2_addr + SGE_UDB_KDOORBELL); 545 q->bar2_addr + SGE_UDB_KDOORBELL);
540 546
541 /* This Write memory Barrier will force the write to 547 /* This Write memory Barrier will force the write to
@@ -818,7 +824,8 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
818 sgl->addr0 = cpu_to_be64(addr[1]); 824 sgl->addr0 = cpu_to_be64(addr[1]);
819 } 825 }
820 826
821 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); 827 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
828 ULPTX_NSGE_V(nfrags));
822 if (likely(--nfrags == 0)) 829 if (likely(--nfrags == 0))
823 return; 830 return;
824 /* 831 /*
@@ -884,7 +891,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
884 * doorbell mechanism; otherwise use the new BAR2 mechanism. 891 * doorbell mechanism; otherwise use the new BAR2 mechanism.
885 */ 892 */
886 if (unlikely(q->bar2_addr == NULL)) { 893 if (unlikely(q->bar2_addr == NULL)) {
887 u32 val = PIDX(n); 894 u32 val = PIDX_V(n);
888 unsigned long flags; 895 unsigned long flags;
889 896
890 /* For T4 we need to participate in the Doorbell Recovery 897 /* For T4 we need to participate in the Doorbell Recovery
@@ -892,14 +899,14 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
892 */ 899 */
893 spin_lock_irqsave(&q->db_lock, flags); 900 spin_lock_irqsave(&q->db_lock, flags);
894 if (!q->db_disabled) 901 if (!q->db_disabled)
895 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 902 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
896 QID(q->cntxt_id) | val); 903 QID_V(q->cntxt_id) | val);
897 else 904 else
898 q->db_pidx_inc += n; 905 q->db_pidx_inc += n;
899 q->db_pidx = q->pidx; 906 q->db_pidx = q->pidx;
900 spin_unlock_irqrestore(&q->db_lock, flags); 907 spin_unlock_irqrestore(&q->db_lock, flags);
901 } else { 908 } else {
902 u32 val = PIDX_T5(n); 909 u32 val = PIDX_T5_V(n);
903 910
904 /* T4 and later chips share the same PIDX field offset within 911 /* T4 and later chips share the same PIDX field offset within
905 * the doorbell, but T5 and later shrank the field in order to 912 * the doorbell, but T5 and later shrank the field in order to
@@ -907,7 +914,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
907 * large in the first place (14 bits) so we just use the T5 914 * large in the first place (14 bits) so we just use the T5
908 * and later limits and warn if a Queue ID is too large. 915 * and later limits and warn if a Queue ID is too large.
909 */ 916 */
910 WARN_ON(val & DBPRIO(1)); 917 WARN_ON(val & DBPRIO_F);
911 918
912 /* If we're only writing a single TX Descriptor and we can use 919 /* If we're only writing a single TX Descriptor and we can use
913 * Inferred QID registers, we can use the Write Combining 920 * Inferred QID registers, we can use the Write Combining
@@ -923,7 +930,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
923 (q->bar2_addr + SGE_UDB_WCDOORBELL), 930 (q->bar2_addr + SGE_UDB_WCDOORBELL),
924 wr); 931 wr);
925 } else { 932 } else {
926 writel(val | QID(q->bar2_qid), 933 writel(val | QID_V(q->bar2_qid),
927 q->bar2_addr + SGE_UDB_KDOORBELL); 934 q->bar2_addr + SGE_UDB_KDOORBELL);
928 } 935 }
929 936
@@ -1150,9 +1157,9 @@ out_free: dev_kfree_skb_any(skb);
1150 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; 1157 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1151 } 1158 }
1152 1159
1153 if (vlan_tx_tag_present(skb)) { 1160 if (skb_vlan_tag_present(skb)) {
1154 q->vlan_ins++; 1161 q->vlan_ins++;
1155 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); 1162 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
1156 } 1163 }
1157 1164
1158 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | 1165 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
@@ -1716,6 +1723,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1716 skb->truesize += skb->data_len; 1723 skb->truesize += skb->data_len;
1717 skb->ip_summed = CHECKSUM_UNNECESSARY; 1724 skb->ip_summed = CHECKSUM_UNNECESSARY;
1718 skb_record_rx_queue(skb, rxq->rspq.idx); 1725 skb_record_rx_queue(skb, rxq->rspq.idx);
1726 skb_mark_napi_id(skb, &rxq->rspq.napi);
1719 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) 1727 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1720 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, 1728 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1721 PKT_HASH_TYPE_L3); 1729 PKT_HASH_TYPE_L3);
@@ -1758,7 +1766,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1758 pkt = (const struct cpl_rx_pkt *)rsp; 1766 pkt = (const struct cpl_rx_pkt *)rsp;
1759 csum_ok = pkt->csum_calc && !pkt->err_vec && 1767 csum_ok = pkt->csum_calc && !pkt->err_vec &&
1760 (q->netdev->features & NETIF_F_RXCSUM); 1768 (q->netdev->features & NETIF_F_RXCSUM);
1761 if ((pkt->l2info & htonl(RXF_TCP)) && 1769 if ((pkt->l2info & htonl(RXF_TCP_F)) &&
1770 !(cxgb_poll_busy_polling(q)) &&
1762 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 1771 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1763 do_gro(rxq, si, pkt); 1772 do_gro(rxq, si, pkt);
1764 return 0; 1773 return 0;
@@ -1780,11 +1789,11 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1780 1789
1781 rxq->stats.pkts++; 1790 rxq->stats.pkts++;
1782 1791
1783 if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { 1792 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
1784 if (!pkt->ip_frag) { 1793 if (!pkt->ip_frag) {
1785 skb->ip_summed = CHECKSUM_UNNECESSARY; 1794 skb->ip_summed = CHECKSUM_UNNECESSARY;
1786 rxq->stats.rx_cso++; 1795 rxq->stats.rx_cso++;
1787 } else if (pkt->l2info & htonl(RXF_IP)) { 1796 } else if (pkt->l2info & htonl(RXF_IP_F)) {
1788 __sum16 c = (__force __sum16)pkt->csum; 1797 __sum16 c = (__force __sum16)pkt->csum;
1789 skb->csum = csum_unfold(c); 1798 skb->csum = csum_unfold(c);
1790 skb->ip_summed = CHECKSUM_COMPLETE; 1799 skb->ip_summed = CHECKSUM_COMPLETE;
@@ -1797,6 +1806,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1797 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); 1806 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1798 rxq->stats.vlan_ex++; 1807 rxq->stats.vlan_ex++;
1799 } 1808 }
1809 skb_mark_napi_id(skb, &q->napi);
1800 netif_receive_skb(skb); 1810 netif_receive_skb(skb);
1801 return 0; 1811 return 0;
1802} 1812}
@@ -1959,6 +1969,38 @@ static int process_responses(struct sge_rspq *q, int budget)
1959 return budget - budget_left; 1969 return budget - budget_left;
1960} 1970}
1961 1971
1972#ifdef CONFIG_NET_RX_BUSY_POLL
1973int cxgb_busy_poll(struct napi_struct *napi)
1974{
1975 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1976 unsigned int params, work_done;
1977 u32 val;
1978
1979 if (!cxgb_poll_lock_poll(q))
1980 return LL_FLUSH_BUSY;
1981
1982 work_done = process_responses(q, 4);
1983 params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
1984 q->next_intr_params = params;
1985 val = CIDXINC_V(work_done) | SEINTARM_V(params);
1986
1987 /* If we don't have access to the new User GTS (T5+), use the old
1988 * doorbell mechanism; otherwise use the new BAR2 mechanism.
1989 */
1990 if (unlikely(!q->bar2_addr))
1991 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
1992 val | INGRESSQID_V((u32)q->cntxt_id));
1993 else {
1994 writel(val | INGRESSQID_V(q->bar2_qid),
1995 q->bar2_addr + SGE_UDB_GTS);
1996 wmb();
1997 }
1998
1999 cxgb_poll_unlock_poll(q);
2000 return work_done;
2001}
2002#endif /* CONFIG_NET_RX_BUSY_POLL */
2003
1962/** 2004/**
1963 * napi_rx_handler - the NAPI handler for Rx processing 2005 * napi_rx_handler - the NAPI handler for Rx processing
1964 * @napi: the napi instance 2006 * @napi: the napi instance
@@ -1974,9 +2016,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
1974{ 2016{
1975 unsigned int params; 2017 unsigned int params;
1976 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); 2018 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1977 int work_done = process_responses(q, budget); 2019 int work_done;
1978 u32 val; 2020 u32 val;
1979 2021
2022 if (!cxgb_poll_lock_napi(q))
2023 return budget;
2024
2025 work_done = process_responses(q, budget);
1980 if (likely(work_done < budget)) { 2026 if (likely(work_done < budget)) {
1981 int timer_index; 2027 int timer_index;
1982 2028
@@ -2001,19 +2047,20 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
2001 } else 2047 } else
2002 params = QINTR_TIMER_IDX(7); 2048 params = QINTR_TIMER_IDX(7);
2003 2049
2004 val = CIDXINC(work_done) | SEINTARM(params); 2050 val = CIDXINC_V(work_done) | SEINTARM_V(params);
2005 2051
2006 /* If we don't have access to the new User GTS (T5+), use the old 2052 /* If we don't have access to the new User GTS (T5+), use the old
2007 * doorbell mechanism; otherwise use the new BAR2 mechanism. 2053 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2008 */ 2054 */
2009 if (unlikely(q->bar2_addr == NULL)) { 2055 if (unlikely(q->bar2_addr == NULL)) {
2010 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), 2056 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2011 val | INGRESSQID((u32)q->cntxt_id)); 2057 val | INGRESSQID_V((u32)q->cntxt_id));
2012 } else { 2058 } else {
2013 writel(val | INGRESSQID(q->bar2_qid), 2059 writel(val | INGRESSQID_V(q->bar2_qid),
2014 q->bar2_addr + SGE_UDB_GTS); 2060 q->bar2_addr + SGE_UDB_GTS);
2015 wmb(); 2061 wmb();
2016 } 2062 }
2063 cxgb_poll_unlock_napi(q);
2017 return work_done; 2064 return work_done;
2018} 2065}
2019 2066
@@ -2056,16 +2103,16 @@ static unsigned int process_intrq(struct adapter *adap)
2056 rspq_next(q); 2103 rspq_next(q);
2057 } 2104 }
2058 2105
2059 val = CIDXINC(credits) | SEINTARM(q->intr_params); 2106 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2060 2107
2061 /* If we don't have access to the new User GTS (T5+), use the old 2108 /* If we don't have access to the new User GTS (T5+), use the old
2062 * doorbell mechanism; otherwise use the new BAR2 mechanism. 2109 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2063 */ 2110 */
2064 if (unlikely(q->bar2_addr == NULL)) { 2111 if (unlikely(q->bar2_addr == NULL)) {
2065 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), 2112 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2066 val | INGRESSQID(q->cntxt_id)); 2113 val | INGRESSQID_V(q->cntxt_id));
2067 } else { 2114 } else {
2068 writel(val | INGRESSQID(q->bar2_qid), 2115 writel(val | INGRESSQID_V(q->bar2_qid),
2069 q->bar2_addr + SGE_UDB_GTS); 2116 q->bar2_addr + SGE_UDB_GTS);
2070 wmb(); 2117 wmb();
2071 } 2118 }
@@ -2095,7 +2142,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie)
2095{ 2142{
2096 struct adapter *adap = cookie; 2143 struct adapter *adap = cookie;
2097 2144
2098 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0); 2145 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2099 if (t4_slow_intr_handler(adap) | process_intrq(adap)) 2146 if (t4_slow_intr_handler(adap) | process_intrq(adap))
2100 return IRQ_HANDLED; 2147 return IRQ_HANDLED;
2101 return IRQ_NONE; /* probably shared interrupt */ 2148 return IRQ_NONE; /* probably shared interrupt */
@@ -2142,9 +2189,9 @@ static void sge_rx_timer_cb(unsigned long data)
2142 } 2189 }
2143 } 2190 }
2144 2191
2145 t4_write_reg(adap, SGE_DEBUG_INDEX, 13); 2192 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
2146 idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); 2193 idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
2147 idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2194 idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2148 2195
2149 for (i = 0; i < 2; i++) { 2196 for (i = 0; i < 2; i++) {
2150 u32 debug0, debug11; 2197 u32 debug0, debug11;
@@ -2188,12 +2235,12 @@ static void sge_rx_timer_cb(unsigned long data)
2188 /* Read and save the SGE IDMA State and Queue ID information. 2235 /* Read and save the SGE IDMA State and Queue ID information.
2189 * We do this every time in case it changes across time ... 2236 * We do this every time in case it changes across time ...
2190 */ 2237 */
2191 t4_write_reg(adap, SGE_DEBUG_INDEX, 0); 2238 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
2192 debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2239 debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2193 s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 2240 s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
2194 2241
2195 t4_write_reg(adap, SGE_DEBUG_INDEX, 11); 2242 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
2196 debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2243 debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2197 s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 2244 s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
2198 2245
2199 CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n", 2246 CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
@@ -2337,6 +2384,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2337 goto err; 2384 goto err;
2338 2385
2339 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); 2386 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2387 napi_hash_add(&iq->napi);
2340 iq->cur_desc = iq->desc; 2388 iq->cur_desc = iq->desc;
2341 iq->cidx = 0; 2389 iq->cidx = 0;
2342 iq->gen = 1; 2390 iq->gen = 1;
@@ -2594,6 +2642,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2594 rq->cntxt_id, fl_id, 0xffff); 2642 rq->cntxt_id, fl_id, 0xffff);
2595 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2643 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2596 rq->desc, rq->phys_addr); 2644 rq->desc, rq->phys_addr);
2645 napi_hash_del(&rq->napi);
2597 netif_napi_del(&rq->napi); 2646 netif_napi_del(&rq->napi);
2598 rq->netdev = NULL; 2647 rq->netdev = NULL;
2599 rq->cntxt_id = rq->abs_id = 0; 2648 rq->cntxt_id = rq->abs_id = 0;
@@ -2738,24 +2787,11 @@ void t4_sge_stop(struct adapter *adap)
2738} 2787}
2739 2788
2740/** 2789/**
2741 * t4_sge_init - initialize SGE 2790 * t4_sge_init_soft - grab core SGE values needed by SGE code
2742 * @adap: the adapter 2791 * @adap: the adapter
2743 * 2792 *
2744 * Performs SGE initialization needed every time after a chip reset. 2793 * We need to grab the SGE operating parameters that we need to have
2745 * We do not initialize any of the queues here, instead the driver 2794 * in order to do our job and make sure we can live with them.
2746 * top-level must request them individually.
2747 *
2748 * Called in two different modes:
2749 *
2750 * 1. Perform actual hardware initialization and record hard-coded
2751 * parameters which were used. This gets used when we're the
2752 * Master PF and the Firmware Configuration File support didn't
2753 * work for some reason.
2754 *
2755 * 2. We're not the Master PF or initialization was performed with
2756 * a Firmware Configuration File. In this case we need to grab
2757 * any of the SGE operating parameters that we need to have in
2758 * order to do our job and make sure we can live with them ...
2759 */ 2795 */
2760 2796
2761static int t4_sge_init_soft(struct adapter *adap) 2797static int t4_sge_init_soft(struct adapter *adap)
@@ -2770,8 +2806,8 @@ static int t4_sge_init_soft(struct adapter *adap)
2770 * process_responses() and that only packet data is going to the 2806 * process_responses() and that only packet data is going to the
2771 * Free Lists. 2807 * Free Lists.
2772 */ 2808 */
2773 if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) != 2809 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
2774 RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { 2810 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2775 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); 2811 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2776 return -EINVAL; 2812 return -EINVAL;
2777 } 2813 }
@@ -2785,7 +2821,7 @@ static int t4_sge_init_soft(struct adapter *adap)
2785 * XXX meet our needs! 2821 * XXX meet our needs!
2786 */ 2822 */
2787 #define READ_FL_BUF(x) \ 2823 #define READ_FL_BUF(x) \
2788 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32)) 2824 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
2789 2825
2790 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); 2826 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2791 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); 2827 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
@@ -2823,99 +2859,38 @@ static int t4_sge_init_soft(struct adapter *adap)
2823 * Retrieve our RX interrupt holdoff timer values and counter 2859 * Retrieve our RX interrupt holdoff timer values and counter
2824 * threshold values from the SGE parameters. 2860 * threshold values from the SGE parameters.
2825 */ 2861 */
2826 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1); 2862 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
2827 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3); 2863 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
2828 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5); 2864 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
2829 s->timer_val[0] = core_ticks_to_us(adap, 2865 s->timer_val[0] = core_ticks_to_us(adap,
2830 TIMERVALUE0_GET(timer_value_0_and_1)); 2866 TIMERVALUE0_G(timer_value_0_and_1));
2831 s->timer_val[1] = core_ticks_to_us(adap, 2867 s->timer_val[1] = core_ticks_to_us(adap,
2832 TIMERVALUE1_GET(timer_value_0_and_1)); 2868 TIMERVALUE1_G(timer_value_0_and_1));
2833 s->timer_val[2] = core_ticks_to_us(adap, 2869 s->timer_val[2] = core_ticks_to_us(adap,
2834 TIMERVALUE2_GET(timer_value_2_and_3)); 2870 TIMERVALUE2_G(timer_value_2_and_3));
2835 s->timer_val[3] = core_ticks_to_us(adap, 2871 s->timer_val[3] = core_ticks_to_us(adap,
2836 TIMERVALUE3_GET(timer_value_2_and_3)); 2872 TIMERVALUE3_G(timer_value_2_and_3));
2837 s->timer_val[4] = core_ticks_to_us(adap, 2873 s->timer_val[4] = core_ticks_to_us(adap,
2838 TIMERVALUE4_GET(timer_value_4_and_5)); 2874 TIMERVALUE4_G(timer_value_4_and_5));
2839 s->timer_val[5] = core_ticks_to_us(adap, 2875 s->timer_val[5] = core_ticks_to_us(adap,
2840 TIMERVALUE5_GET(timer_value_4_and_5)); 2876 TIMERVALUE5_G(timer_value_4_and_5));
2841 2877
2842 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD); 2878 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
2843 s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold); 2879 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
2844 s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold); 2880 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
2845 s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold); 2881 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
2846 s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold); 2882 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
2847
2848 return 0;
2849}
2850
2851static int t4_sge_init_hard(struct adapter *adap)
2852{
2853 struct sge *s = &adap->sge;
2854
2855 /*
2856 * Set up our basic SGE mode to deliver CPL messages to our Ingress
2857 * Queue and Packet Date to the Free List.
2858 */
2859 t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
2860 RXPKTCPLMODE_MASK);
2861
2862 /*
2863 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2864 * and generate an interrupt when this occurs so we can recover.
2865 */
2866 if (is_t4(adap->params.chip)) {
2867 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2868 V_HP_INT_THRESH(M_HP_INT_THRESH) |
2869 V_LP_INT_THRESH(M_LP_INT_THRESH),
2870 V_HP_INT_THRESH(dbfifo_int_thresh) |
2871 V_LP_INT_THRESH(dbfifo_int_thresh));
2872 } else {
2873 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2874 V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
2875 V_LP_INT_THRESH_T5(dbfifo_int_thresh));
2876 t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
2877 V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
2878 V_HP_INT_THRESH_T5(dbfifo_int_thresh));
2879 }
2880 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2881 F_ENABLE_DROP);
2882
2883 /*
2884 * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
2885 * t4_fixup_host_params().
2886 */
2887 s->fl_pg_order = FL_PG_ORDER;
2888 if (s->fl_pg_order)
2889 t4_write_reg(adap,
2890 SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
2891 PAGE_SIZE << FL_PG_ORDER);
2892 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
2893 FL_MTU_SMALL_BUFSIZE(adap));
2894 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
2895 FL_MTU_LARGE_BUFSIZE(adap));
2896
2897 /*
2898 * Note that the SGE Ingress Packet Count Interrupt Threshold and
2899 * Timer Holdoff values must be supplied by our caller.
2900 */
2901 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2902 THRESHOLD_0(s->counter_val[0]) |
2903 THRESHOLD_1(s->counter_val[1]) |
2904 THRESHOLD_2(s->counter_val[2]) |
2905 THRESHOLD_3(s->counter_val[3]));
2906 t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2907 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2908 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2909 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2910 TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
2911 TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
2912 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2913 TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
2914 TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
2915 2883
2916 return 0; 2884 return 0;
2917} 2885}
2918 2886
2887/**
2888 * t4_sge_init - initialize SGE
2889 * @adap: the adapter
2890 *
2891 * Perform low-level SGE code initialization needed every time after a
2892 * chip reset.
2893 */
2919int t4_sge_init(struct adapter *adap) 2894int t4_sge_init(struct adapter *adap)
2920{ 2895{
2921 struct sge *s = &adap->sge; 2896 struct sge *s = &adap->sge;
@@ -2927,9 +2902,9 @@ int t4_sge_init(struct adapter *adap)
2927 * Ingress Padding Boundary and Egress Status Page Size are set up by 2902 * Ingress Padding Boundary and Egress Status Page Size are set up by
2928 * t4_fixup_host_params(). 2903 * t4_fixup_host_params().
2929 */ 2904 */
2930 sge_control = t4_read_reg(adap, SGE_CONTROL); 2905 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
2931 s->pktshift = PKTSHIFT_GET(sge_control); 2906 s->pktshift = PKTSHIFT_G(sge_control);
2932 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; 2907 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
2933 2908
2934 /* T4 uses a single control field to specify both the PCIe Padding and 2909 /* T4 uses a single control field to specify both the PCIe Padding and
2935 * Packing Boundary. T5 introduced the ability to specify these 2910 * Packing Boundary. T5 introduced the ability to specify these
@@ -2937,8 +2912,8 @@ int t4_sge_init(struct adapter *adap)
2937 * within Packed Buffer Mode is the maximum of these two 2912 * within Packed Buffer Mode is the maximum of these two
2938 * specifications. 2913 * specifications.
2939 */ 2914 */
2940 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) + 2915 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
2941 X_INGPADBOUNDARY_SHIFT); 2916 INGPADBOUNDARY_SHIFT_X);
2942 if (is_t4(adap->params.chip)) { 2917 if (is_t4(adap->params.chip)) {
2943 s->fl_align = ingpadboundary; 2918 s->fl_align = ingpadboundary;
2944 } else { 2919 } else {
@@ -2956,10 +2931,7 @@ int t4_sge_init(struct adapter *adap)
2956 s->fl_align = max(ingpadboundary, ingpackboundary); 2931 s->fl_align = max(ingpadboundary, ingpackboundary);
2957 } 2932 }
2958 2933
2959 if (adap->flags & USING_SOFT_PARAMS) 2934 ret = t4_sge_init_soft(adap);
2960 ret = t4_sge_init_soft(adap);
2961 else
2962 ret = t4_sge_init_hard(adap);
2963 if (ret < 0) 2935 if (ret < 0)
2964 return ret; 2936 return ret;
2965 2937
@@ -2975,11 +2947,11 @@ int t4_sge_init(struct adapter *adap)
2975 * buffers and a new field which only applies to Packed Mode Free List 2947 * buffers and a new field which only applies to Packed Mode Free List
2976 * buffers. 2948 * buffers.
2977 */ 2949 */
2978 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL); 2950 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
2979 if (is_t4(adap->params.chip)) 2951 if (is_t4(adap->params.chip))
2980 egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl); 2952 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
2981 else 2953 else
2982 egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl); 2954 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
2983 s->fl_starve_thres = 2*egress_threshold + 1; 2955 s->fl_starve_thres = 2*egress_threshold + 1;
2984 2956
2985 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); 2957 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index c132d9030729..4d643b65265e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include "cxgb4.h" 36#include "cxgb4.h"
37#include "t4_regs.h" 37#include "t4_regs.h"
38#include "t4_values.h"
38#include "t4fw_api.h" 39#include "t4fw_api.h"
39 40
40/** 41/**
@@ -149,20 +150,20 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
149 */ 150 */
150void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) 151void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
151{ 152{
152 u32 req = ENABLE | FUNCTION(adap->fn) | reg; 153 u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
153 154
154 if (is_t4(adap->params.chip)) 155 if (is_t4(adap->params.chip))
155 req |= F_LOCALCFG; 156 req |= LOCALCFG_F;
156 157
157 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req); 158 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
158 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA); 159 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
159 160
160 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 161 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
161 * Configuration Space read. (None of the other fields matter when 162 * Configuration Space read. (None of the other fields matter when
162 * ENABLE is 0 so a simple register write is easier than a 163 * ENABLE is 0 so a simple register write is easier than a
163 * read-modify-write via t4_set_reg_field().) 164 * read-modify-write via t4_set_reg_field().)
164 */ 165 */
165 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0); 166 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
166} 167}
167 168
168/* 169/*
@@ -187,8 +188,8 @@ static void t4_report_fw_error(struct adapter *adap)
187 }; 188 };
188 u32 pcie_fw; 189 u32 pcie_fw;
189 190
190 pcie_fw = t4_read_reg(adap, MA_PCIE_FW); 191 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
191 if (pcie_fw & PCIE_FW_ERR) 192 if (pcie_fw & PCIE_FW_ERR_F)
192 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", 193 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
193 reason[PCIE_FW_EVAL_G(pcie_fw)]); 194 reason[PCIE_FW_EVAL_G(pcie_fw)]);
194} 195}
@@ -264,8 +265,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
264 u64 res; 265 u64 res;
265 int i, ms, delay_idx; 266 int i, ms, delay_idx;
266 const __be64 *p = cmd; 267 const __be64 *p = cmd;
267 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); 268 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
268 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); 269 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
269 270
270 if ((size & 15) || size > MBOX_LEN) 271 if ((size & 15) || size > MBOX_LEN)
271 return -EINVAL; 272 return -EINVAL;
@@ -277,9 +278,9 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
277 if (adap->pdev->error_state != pci_channel_io_normal) 278 if (adap->pdev->error_state != pci_channel_io_normal)
278 return -EIO; 279 return -EIO;
279 280
280 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 281 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
281 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 282 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
282 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 283 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
283 284
284 if (v != MBOX_OWNER_DRV) 285 if (v != MBOX_OWNER_DRV)
285 return v ? -EBUSY : -ETIMEDOUT; 286 return v ? -EBUSY : -ETIMEDOUT;
@@ -287,7 +288,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
287 for (i = 0; i < size; i += 8) 288 for (i = 0; i < size; i += 8)
288 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); 289 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
289 290
290 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 291 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
291 t4_read_reg(adap, ctl_reg); /* flush write */ 292 t4_read_reg(adap, ctl_reg); /* flush write */
292 293
293 delay_idx = 0; 294 delay_idx = 0;
@@ -303,8 +304,8 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
303 mdelay(ms); 304 mdelay(ms);
304 305
305 v = t4_read_reg(adap, ctl_reg); 306 v = t4_read_reg(adap, ctl_reg);
306 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { 307 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
307 if (!(v & MBMSGVALID)) { 308 if (!(v & MBMSGVALID_F)) {
308 t4_write_reg(adap, ctl_reg, 0); 309 t4_write_reg(adap, ctl_reg, 0);
309 continue; 310 continue;
310 } 311 }
@@ -350,27 +351,27 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
350 u32 mc_bist_status_rdata, mc_bist_data_pattern; 351 u32 mc_bist_status_rdata, mc_bist_data_pattern;
351 352
352 if (is_t4(adap->params.chip)) { 353 if (is_t4(adap->params.chip)) {
353 mc_bist_cmd = MC_BIST_CMD; 354 mc_bist_cmd = MC_BIST_CMD_A;
354 mc_bist_cmd_addr = MC_BIST_CMD_ADDR; 355 mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
355 mc_bist_cmd_len = MC_BIST_CMD_LEN; 356 mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
356 mc_bist_status_rdata = MC_BIST_STATUS_RDATA; 357 mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
357 mc_bist_data_pattern = MC_BIST_DATA_PATTERN; 358 mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
358 } else { 359 } else {
359 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx); 360 mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
360 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx); 361 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
361 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx); 362 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
362 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx); 363 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
363 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx); 364 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
364 } 365 }
365 366
366 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST) 367 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
367 return -EBUSY; 368 return -EBUSY;
368 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU); 369 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
369 t4_write_reg(adap, mc_bist_cmd_len, 64); 370 t4_write_reg(adap, mc_bist_cmd_len, 64);
370 t4_write_reg(adap, mc_bist_data_pattern, 0xc); 371 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
371 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST | 372 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
372 BIST_CMD_GAP(1)); 373 BIST_CMD_GAP_V(1));
373 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1); 374 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
374 if (i) 375 if (i)
375 return i; 376 return i;
376 377
@@ -403,31 +404,31 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
403 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; 404 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
404 405
405 if (is_t4(adap->params.chip)) { 406 if (is_t4(adap->params.chip)) {
406 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); 407 edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
407 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); 408 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
408 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); 409 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
409 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN, 410 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
410 idx);
411 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
412 idx); 411 idx);
412 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
413 idx);
413 } else { 414 } else {
414 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx); 415 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
415 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx); 416 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
416 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx); 417 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
417 edc_bist_cmd_data_pattern = 418 edc_bist_cmd_data_pattern =
418 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx); 419 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
419 edc_bist_status_rdata = 420 edc_bist_status_rdata =
420 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx); 421 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
421 } 422 }
422 423
423 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST) 424 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
424 return -EBUSY; 425 return -EBUSY;
425 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU); 426 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
426 t4_write_reg(adap, edc_bist_cmd_len, 64); 427 t4_write_reg(adap, edc_bist_cmd_len, 64);
427 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 428 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
428 t4_write_reg(adap, edc_bist_cmd, 429 t4_write_reg(adap, edc_bist_cmd,
429 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); 430 BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
430 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1); 431 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
431 if (i) 432 if (i)
432 return i; 433 return i;
433 434
@@ -505,13 +506,13 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
505 * the address is relative to BAR0. 506 * the address is relative to BAR0.
506 */ 507 */
507 mem_reg = t4_read_reg(adap, 508 mem_reg = t4_read_reg(adap,
508 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 509 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
509 win)); 510 win));
510 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10); 511 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
511 mem_base = GET_PCIEOFST(mem_reg) << 10; 512 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
512 if (is_t4(adap->params.chip)) 513 if (is_t4(adap->params.chip))
513 mem_base -= adap->t4_bar0; 514 mem_base -= adap->t4_bar0;
514 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); 515 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
515 516
516 /* Calculate our initial PCI-E Memory Window Position and Offset into 517 /* Calculate our initial PCI-E Memory Window Position and Offset into
517 * that Window. 518 * that Window.
@@ -524,10 +525,10 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
524 * attempt to use the new value.) 525 * attempt to use the new value.)
525 */ 526 */
526 t4_write_reg(adap, 527 t4_write_reg(adap,
527 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win), 528 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
528 pos | win_pf); 529 pos | win_pf);
529 t4_read_reg(adap, 530 t4_read_reg(adap,
530 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); 531 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
531 532
532 /* Transfer data to/from the adapter as long as there's an integral 533 /* Transfer data to/from the adapter as long as there's an integral
533 * number of 32-bit transfers to complete. 534 * number of 32-bit transfers to complete.
@@ -552,11 +553,11 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
552 pos += mem_aperture; 553 pos += mem_aperture;
553 offset = 0; 554 offset = 0;
554 t4_write_reg(adap, 555 t4_write_reg(adap,
555 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 556 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
556 win), pos | win_pf); 557 win), pos | win_pf);
557 t4_read_reg(adap, 558 t4_read_reg(adap,
558 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 559 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
559 win)); 560 win));
560 } 561 }
561 } 562 }
562 563
@@ -760,14 +761,13 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
760 761
761 if (!byte_cnt || byte_cnt > 4) 762 if (!byte_cnt || byte_cnt > 4)
762 return -EINVAL; 763 return -EINVAL;
763 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 764 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
764 return -EBUSY; 765 return -EBUSY;
765 cont = cont ? SF_CONT : 0; 766 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
766 lock = lock ? SF_LOCK : 0; 767 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
767 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); 768 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
768 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
769 if (!ret) 769 if (!ret)
770 *valp = t4_read_reg(adapter, SF_DATA); 770 *valp = t4_read_reg(adapter, SF_DATA_A);
771 return ret; 771 return ret;
772} 772}
773 773
@@ -788,14 +788,12 @@ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
788{ 788{
789 if (!byte_cnt || byte_cnt > 4) 789 if (!byte_cnt || byte_cnt > 4)
790 return -EINVAL; 790 return -EINVAL;
791 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 791 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
792 return -EBUSY; 792 return -EBUSY;
793 cont = cont ? SF_CONT : 0; 793 t4_write_reg(adapter, SF_DATA_A, val);
794 lock = lock ? SF_LOCK : 0; 794 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
795 t4_write_reg(adapter, SF_DATA, val); 795 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
796 t4_write_reg(adapter, SF_OP, lock | 796 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
797 cont | BYTECNT(byte_cnt - 1) | OP_WR);
798 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
799} 797}
800 798
801/** 799/**
@@ -837,8 +835,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
837 * (i.e., big-endian), otherwise as 32-bit words in the platform's 835 * (i.e., big-endian), otherwise as 32-bit words in the platform's
838 * natural endianess. 836 * natural endianess.
839 */ 837 */
840static int t4_read_flash(struct adapter *adapter, unsigned int addr, 838int t4_read_flash(struct adapter *adapter, unsigned int addr,
841 unsigned int nwords, u32 *data, int byte_oriented) 839 unsigned int nwords, u32 *data, int byte_oriented)
842{ 840{
843 int ret; 841 int ret;
844 842
@@ -854,7 +852,7 @@ static int t4_read_flash(struct adapter *adapter, unsigned int addr,
854 for ( ; nwords; nwords--, data++) { 852 for ( ; nwords; nwords--, data++) {
855 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 853 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
856 if (nwords == 1) 854 if (nwords == 1)
857 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 855 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
858 if (ret) 856 if (ret)
859 return ret; 857 return ret;
860 if (byte_oriented) 858 if (byte_oriented)
@@ -902,7 +900,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
902 if (ret) 900 if (ret)
903 goto unlock; 901 goto unlock;
904 902
905 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 903 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
906 904
907 /* Read the page to verify the write succeeded */ 905 /* Read the page to verify the write succeeded */
908 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 906 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
@@ -918,7 +916,7 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
918 return 0; 916 return 0;
919 917
920unlock: 918unlock:
921 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 919 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
922 return ret; 920 return ret;
923} 921}
924 922
@@ -950,6 +948,43 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers)
950 1, vers, 0); 948 1, vers, 0);
951} 949}
952 950
951/**
952 * t4_get_exprom_version - return the Expansion ROM version (if any)
953 * @adapter: the adapter
954 * @vers: where to place the version
955 *
956 * Reads the Expansion ROM header from FLASH and returns the version
957 * number (if present) through the @vers return value pointer. We return
958 * this in the Firmware Version Format since it's convenient. Return
959 * 0 on success, -ENOENT if no Expansion ROM is present.
960 */
961int t4_get_exprom_version(struct adapter *adap, u32 *vers)
962{
963 struct exprom_header {
964 unsigned char hdr_arr[16]; /* must start with 0x55aa */
965 unsigned char hdr_ver[4]; /* Expansion ROM version */
966 } *hdr;
967 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
968 sizeof(u32))];
969 int ret;
970
971 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
972 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
973 0);
974 if (ret)
975 return ret;
976
977 hdr = (struct exprom_header *)exprom_header_buf;
978 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
979 return -ENOENT;
980
981 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
982 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
983 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
984 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
985 return 0;
986}
987
953/* Is the given firmware API compatible with the one the driver was compiled 988/* Is the given firmware API compatible with the one the driver was compiled
954 * with? 989 * with?
955 */ 990 */
@@ -1113,7 +1148,7 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1113 } 1148 }
1114 start++; 1149 start++;
1115 } 1150 }
1116 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 1151 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
1117 return ret; 1152 return ret;
1118} 1153}
1119 1154
@@ -1241,6 +1276,45 @@ out:
1241 return ret; 1276 return ret;
1242} 1277}
1243 1278
1279/**
1280 * t4_fwcache - firmware cache operation
1281 * @adap: the adapter
1282 * @op : the operation (flush or flush and invalidate)
1283 */
1284int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
1285{
1286 struct fw_params_cmd c;
1287
1288 memset(&c, 0, sizeof(c));
1289 c.op_to_vfn =
1290 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
1291 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
1292 FW_PARAMS_CMD_PFN_V(adap->fn) |
1293 FW_PARAMS_CMD_VFN_V(0));
1294 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
1295 c.param[0].mnem =
1296 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1297 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
1298 c.param[0].val = (__force __be32)op;
1299
1300 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
1301}
1302
1303void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
1304{
1305 unsigned int i, j;
1306
1307 for (i = 0; i < 8; i++) {
1308 u32 *p = la_buf + i;
1309
1310 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
1311 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
1312 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
1313 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
1314 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
1315 }
1316}
1317
1244#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1318#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1245 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 1319 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1246 FW_PORT_CAP_ANEG) 1320 FW_PORT_CAP_ANEG)
@@ -1365,95 +1439,97 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1365static void pcie_intr_handler(struct adapter *adapter) 1439static void pcie_intr_handler(struct adapter *adapter)
1366{ 1440{
1367 static const struct intr_info sysbus_intr_info[] = { 1441 static const struct intr_info sysbus_intr_info[] = {
1368 { RNPP, "RXNP array parity error", -1, 1 }, 1442 { RNPP_F, "RXNP array parity error", -1, 1 },
1369 { RPCP, "RXPC array parity error", -1, 1 }, 1443 { RPCP_F, "RXPC array parity error", -1, 1 },
1370 { RCIP, "RXCIF array parity error", -1, 1 }, 1444 { RCIP_F, "RXCIF array parity error", -1, 1 },
1371 { RCCP, "Rx completions control array parity error", -1, 1 }, 1445 { RCCP_F, "Rx completions control array parity error", -1, 1 },
1372 { RFTP, "RXFT array parity error", -1, 1 }, 1446 { RFTP_F, "RXFT array parity error", -1, 1 },
1373 { 0 } 1447 { 0 }
1374 }; 1448 };
1375 static const struct intr_info pcie_port_intr_info[] = { 1449 static const struct intr_info pcie_port_intr_info[] = {
1376 { TPCP, "TXPC array parity error", -1, 1 }, 1450 { TPCP_F, "TXPC array parity error", -1, 1 },
1377 { TNPP, "TXNP array parity error", -1, 1 }, 1451 { TNPP_F, "TXNP array parity error", -1, 1 },
1378 { TFTP, "TXFT array parity error", -1, 1 }, 1452 { TFTP_F, "TXFT array parity error", -1, 1 },
1379 { TCAP, "TXCA array parity error", -1, 1 }, 1453 { TCAP_F, "TXCA array parity error", -1, 1 },
1380 { TCIP, "TXCIF array parity error", -1, 1 }, 1454 { TCIP_F, "TXCIF array parity error", -1, 1 },
1381 { RCAP, "RXCA array parity error", -1, 1 }, 1455 { RCAP_F, "RXCA array parity error", -1, 1 },
1382 { OTDD, "outbound request TLP discarded", -1, 1 }, 1456 { OTDD_F, "outbound request TLP discarded", -1, 1 },
1383 { RDPE, "Rx data parity error", -1, 1 }, 1457 { RDPE_F, "Rx data parity error", -1, 1 },
1384 { TDUE, "Tx uncorrectable data error", -1, 1 }, 1458 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
1385 { 0 } 1459 { 0 }
1386 }; 1460 };
1387 static const struct intr_info pcie_intr_info[] = { 1461 static const struct intr_info pcie_intr_info[] = {
1388 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 1462 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
1389 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 1463 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
1390 { MSIDATAPERR, "MSI data parity error", -1, 1 }, 1464 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
1391 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1465 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
1392 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1466 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
1393 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1467 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
1394 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1468 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
1395 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 1469 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
1396 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 1470 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
1397 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1471 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
1398 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 1472 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
1399 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1473 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
1400 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1474 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
1401 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 1475 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
1402 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1476 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
1403 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1477 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
1404 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 1478 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
1405 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1479 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
1406 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1480 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
1407 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1481 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
1408 { FIDPERR, "PCI FID parity error", -1, 1 }, 1482 { FIDPERR_F, "PCI FID parity error", -1, 1 },
1409 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 1483 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
1410 { MATAGPERR, "PCI MA tag parity error", -1, 1 }, 1484 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
1411 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1485 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
1412 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 1486 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
1413 { RXWRPERR, "PCI Rx write parity error", -1, 1 }, 1487 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
1414 { RPLPERR, "PCI replay buffer parity error", -1, 1 }, 1488 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
1415 { PCIESINT, "PCI core secondary fault", -1, 1 }, 1489 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
1416 { PCIEPINT, "PCI core primary fault", -1, 1 }, 1490 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
1417 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, 1491 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
1492 -1, 0 },
1418 { 0 } 1493 { 0 }
1419 }; 1494 };
1420 1495
1421 static struct intr_info t5_pcie_intr_info[] = { 1496 static struct intr_info t5_pcie_intr_info[] = {
1422 { MSTGRPPERR, "Master Response Read Queue parity error", 1497 { MSTGRPPERR_F, "Master Response Read Queue parity error",
1423 -1, 1 }, 1498 -1, 1 },
1424 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 1499 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
1425 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 1500 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
1426 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1501 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
1427 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1502 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
1428 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1503 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
1429 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1504 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
1430 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 1505 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
1431 -1, 1 }, 1506 -1, 1 },
1432 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 1507 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
1433 -1, 1 }, 1508 -1, 1 },
1434 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1509 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
1435 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 1510 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
1436 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1511 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
1437 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1512 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
1438 { DREQWRPERR, "PCI DMA channel write request parity error", 1513 { DREQWRPERR_F, "PCI DMA channel write request parity error",
1439 -1, 1 }, 1514 -1, 1 },
1440 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1515 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
1441 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1516 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
1442 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 1517 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
1443 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1518 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
1444 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1519 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
1445 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1520 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
1446 { FIDPERR, "PCI FID parity error", -1, 1 }, 1521 { FIDPERR_F, "PCI FID parity error", -1, 1 },
1447 { VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 1522 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
1448 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 1523 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
1449 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1524 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
1450 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 1525 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
1451 -1, 1 }, 1526 -1, 1 },
1452 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 }, 1527 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
1453 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 1528 -1, 1 },
1454 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 1529 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
1455 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 1530 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
1456 { READRSPERR, "Outbound read error", -1, 0 }, 1531 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1532 { READRSPERR_F, "Outbound read error", -1, 0 },
1457 { 0 } 1533 { 0 }
1458 }; 1534 };
1459 1535
@@ -1461,15 +1537,15 @@ static void pcie_intr_handler(struct adapter *adapter)
1461 1537
1462 if (is_t4(adapter->params.chip)) 1538 if (is_t4(adapter->params.chip))
1463 fat = t4_handle_intr_status(adapter, 1539 fat = t4_handle_intr_status(adapter,
1464 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 1540 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
1465 sysbus_intr_info) + 1541 sysbus_intr_info) +
1466 t4_handle_intr_status(adapter, 1542 t4_handle_intr_status(adapter,
1467 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1543 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
1468 pcie_port_intr_info) + 1544 pcie_port_intr_info) +
1469 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1545 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
1470 pcie_intr_info); 1546 pcie_intr_info);
1471 else 1547 else
1472 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1548 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
1473 t5_pcie_intr_info); 1549 t5_pcie_intr_info);
1474 1550
1475 if (fat) 1551 if (fat)
@@ -1483,11 +1559,11 @@ static void tp_intr_handler(struct adapter *adapter)
1483{ 1559{
1484 static const struct intr_info tp_intr_info[] = { 1560 static const struct intr_info tp_intr_info[] = {
1485 { 0x3fffffff, "TP parity error", -1, 1 }, 1561 { 0x3fffffff, "TP parity error", -1, 1 },
1486 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 1562 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
1487 { 0 } 1563 { 0 }
1488 }; 1564 };
1489 1565
1490 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) 1566 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
1491 t4_fatal_err(adapter); 1567 t4_fatal_err(adapter);
1492} 1568}
1493 1569
@@ -1499,102 +1575,107 @@ static void sge_intr_handler(struct adapter *adapter)
1499 u64 v; 1575 u64 v;
1500 1576
1501 static const struct intr_info sge_intr_info[] = { 1577 static const struct intr_info sge_intr_info[] = {
1502 { ERR_CPL_EXCEED_IQE_SIZE, 1578 { ERR_CPL_EXCEED_IQE_SIZE_F,
1503 "SGE received CPL exceeding IQE size", -1, 1 }, 1579 "SGE received CPL exceeding IQE size", -1, 1 },
1504 { ERR_INVALID_CIDX_INC, 1580 { ERR_INVALID_CIDX_INC_F,
1505 "SGE GTS CIDX increment too large", -1, 0 }, 1581 "SGE GTS CIDX increment too large", -1, 0 },
1506 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 1582 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
1507 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 1583 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
1508 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 1584 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
1509 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 1585 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
1510 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 1586 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
1511 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1587 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1512 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 1588 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
1513 0 }, 1589 0 },
1514 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 1590 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
1515 0 }, 1591 0 },
1516 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 1592 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
1517 0 }, 1593 0 },
1518 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 1594 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
1519 0 }, 1595 0 },
1520 { ERR_ING_CTXT_PRIO, 1596 { ERR_ING_CTXT_PRIO_F,
1521 "SGE too many priority ingress contexts", -1, 0 }, 1597 "SGE too many priority ingress contexts", -1, 0 },
1522 { ERR_EGR_CTXT_PRIO, 1598 { ERR_EGR_CTXT_PRIO_F,
1523 "SGE too many priority egress contexts", -1, 0 }, 1599 "SGE too many priority egress contexts", -1, 0 },
1524 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 1600 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
1525 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 1601 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
1526 { 0 } 1602 { 0 }
1527 }; 1603 };
1528 1604
1529 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | 1605 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
1530 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); 1606 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
1531 if (v) { 1607 if (v) {
1532 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", 1608 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1533 (unsigned long long)v); 1609 (unsigned long long)v);
1534 t4_write_reg(adapter, SGE_INT_CAUSE1, v); 1610 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
1535 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); 1611 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
1536 } 1612 }
1537 1613
1538 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || 1614 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
1539 v != 0) 1615 v != 0)
1540 t4_fatal_err(adapter); 1616 t4_fatal_err(adapter);
1541} 1617}
1542 1618
1619#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
1620 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
1621#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
1622 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
1623
1543/* 1624/*
1544 * CIM interrupt handler. 1625 * CIM interrupt handler.
1545 */ 1626 */
1546static void cim_intr_handler(struct adapter *adapter) 1627static void cim_intr_handler(struct adapter *adapter)
1547{ 1628{
1548 static const struct intr_info cim_intr_info[] = { 1629 static const struct intr_info cim_intr_info[] = {
1549 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 1630 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
1550 { OBQPARERR, "CIM OBQ parity error", -1, 1 }, 1631 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1551 { IBQPARERR, "CIM IBQ parity error", -1, 1 }, 1632 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1552 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 1633 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
1553 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 1634 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
1554 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 1635 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
1555 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 1636 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
1556 { 0 } 1637 { 0 }
1557 }; 1638 };
1558 static const struct intr_info cim_upintr_info[] = { 1639 static const struct intr_info cim_upintr_info[] = {
1559 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 1640 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
1560 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 1641 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
1561 { ILLWRINT, "CIM illegal write", -1, 1 }, 1642 { ILLWRINT_F, "CIM illegal write", -1, 1 },
1562 { ILLRDINT, "CIM illegal read", -1, 1 }, 1643 { ILLRDINT_F, "CIM illegal read", -1, 1 },
1563 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 1644 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
1564 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 1645 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
1565 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 1646 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
1566 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 1647 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
1567 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 1648 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
1568 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 1649 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
1569 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 1650 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
1570 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 1651 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
1571 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 1652 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
1572 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 1653 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
1573 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 1654 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
1574 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 1655 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
1575 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 1656 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
1576 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 1657 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
1577 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 1658 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
1578 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 1659 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
1579 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 1660 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
1580 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 1661 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
1581 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 1662 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
1582 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 1663 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
1583 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 1664 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
1584 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 1665 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
1585 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 1666 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
1586 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 1667 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
1587 { 0 } 1668 { 0 }
1588 }; 1669 };
1589 1670
1590 int fat; 1671 int fat;
1591 1672
1592 if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR) 1673 if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
1593 t4_report_fw_error(adapter); 1674 t4_report_fw_error(adapter);
1594 1675
1595 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, 1676 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
1596 cim_intr_info) + 1677 cim_intr_info) +
1597 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, 1678 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
1598 cim_upintr_info); 1679 cim_upintr_info);
1599 if (fat) 1680 if (fat)
1600 t4_fatal_err(adapter); 1681 t4_fatal_err(adapter);
@@ -1611,7 +1692,7 @@ static void ulprx_intr_handler(struct adapter *adapter)
1611 { 0 } 1692 { 0 }
1612 }; 1693 };
1613 1694
1614 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) 1695 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
1615 t4_fatal_err(adapter); 1696 t4_fatal_err(adapter);
1616} 1697}
1617 1698
@@ -1621,19 +1702,19 @@ static void ulprx_intr_handler(struct adapter *adapter)
1621static void ulptx_intr_handler(struct adapter *adapter) 1702static void ulptx_intr_handler(struct adapter *adapter)
1622{ 1703{
1623 static const struct intr_info ulptx_intr_info[] = { 1704 static const struct intr_info ulptx_intr_info[] = {
1624 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 1705 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
1625 0 }, 1706 0 },
1626 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 1707 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
1627 0 }, 1708 0 },
1628 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 1709 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
1629 0 }, 1710 0 },
1630 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 1711 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
1631 0 }, 1712 0 },
1632 { 0xfffffff, "ULPTX parity error", -1, 1 }, 1713 { 0xfffffff, "ULPTX parity error", -1, 1 },
1633 { 0 } 1714 { 0 }
1634 }; 1715 };
1635 1716
1636 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) 1717 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
1637 t4_fatal_err(adapter); 1718 t4_fatal_err(adapter);
1638} 1719}
1639 1720
@@ -1643,19 +1724,20 @@ static void ulptx_intr_handler(struct adapter *adapter)
1643static void pmtx_intr_handler(struct adapter *adapter) 1724static void pmtx_intr_handler(struct adapter *adapter)
1644{ 1725{
1645 static const struct intr_info pmtx_intr_info[] = { 1726 static const struct intr_info pmtx_intr_info[] = {
1646 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 1727 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
1647 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 1728 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
1648 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 1729 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
1649 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 1730 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
1650 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, 1731 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
1651 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 1732 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
1652 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, 1733 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
1653 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 1734 -1, 1 },
1654 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 1735 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
1736 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
1655 { 0 } 1737 { 0 }
1656 }; 1738 };
1657 1739
1658 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) 1740 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
1659 t4_fatal_err(adapter); 1741 t4_fatal_err(adapter);
1660} 1742}
1661 1743
@@ -1665,16 +1747,17 @@ static void pmtx_intr_handler(struct adapter *adapter)
1665static void pmrx_intr_handler(struct adapter *adapter) 1747static void pmrx_intr_handler(struct adapter *adapter)
1666{ 1748{
1667 static const struct intr_info pmrx_intr_info[] = { 1749 static const struct intr_info pmrx_intr_info[] = {
1668 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 1750 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
1669 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, 1751 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
1670 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 1752 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
1671 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, 1753 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
1672 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 1754 -1, 1 },
1673 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 1755 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
1756 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
1674 { 0 } 1757 { 0 }
1675 }; 1758 };
1676 1759
1677 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) 1760 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
1678 t4_fatal_err(adapter); 1761 t4_fatal_err(adapter);
1679} 1762}
1680 1763
@@ -1684,16 +1767,16 @@ static void pmrx_intr_handler(struct adapter *adapter)
1684static void cplsw_intr_handler(struct adapter *adapter) 1767static void cplsw_intr_handler(struct adapter *adapter)
1685{ 1768{
1686 static const struct intr_info cplsw_intr_info[] = { 1769 static const struct intr_info cplsw_intr_info[] = {
1687 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 1770 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
1688 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 1771 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
1689 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 1772 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
1690 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 1773 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
1691 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 1774 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
1692 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 1775 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
1693 { 0 } 1776 { 0 }
1694 }; 1777 };
1695 1778
1696 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) 1779 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
1697 t4_fatal_err(adapter); 1780 t4_fatal_err(adapter);
1698} 1781}
1699 1782
@@ -1703,15 +1786,15 @@ static void cplsw_intr_handler(struct adapter *adapter)
1703static void le_intr_handler(struct adapter *adap) 1786static void le_intr_handler(struct adapter *adap)
1704{ 1787{
1705 static const struct intr_info le_intr_info[] = { 1788 static const struct intr_info le_intr_info[] = {
1706 { LIPMISS, "LE LIP miss", -1, 0 }, 1789 { LIPMISS_F, "LE LIP miss", -1, 0 },
1707 { LIP0, "LE 0 LIP error", -1, 0 }, 1790 { LIP0_F, "LE 0 LIP error", -1, 0 },
1708 { PARITYERR, "LE parity error", -1, 1 }, 1791 { PARITYERR_F, "LE parity error", -1, 1 },
1709 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 1792 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
1710 { REQQPARERR, "LE request queue parity error", -1, 1 }, 1793 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
1711 { 0 } 1794 { 0 }
1712 }; 1795 };
1713 1796
1714 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) 1797 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
1715 t4_fatal_err(adap); 1798 t4_fatal_err(adap);
1716} 1799}
1717 1800
@@ -1725,19 +1808,22 @@ static void mps_intr_handler(struct adapter *adapter)
1725 { 0 } 1808 { 0 }
1726 }; 1809 };
1727 static const struct intr_info mps_tx_intr_info[] = { 1810 static const struct intr_info mps_tx_intr_info[] = {
1728 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 1811 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
1729 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 1812 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1730 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 1813 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
1731 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 1814 -1, 1 },
1732 { BUBBLE, "MPS Tx underflow", -1, 1 }, 1815 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
1733 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 1816 -1, 1 },
1734 { FRMERR, "MPS Tx framing error", -1, 1 }, 1817 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
1818 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
1819 { FRMERR_F, "MPS Tx framing error", -1, 1 },
1735 { 0 } 1820 { 0 }
1736 }; 1821 };
1737 static const struct intr_info mps_trc_intr_info[] = { 1822 static const struct intr_info mps_trc_intr_info[] = {
1738 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 1823 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
1739 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 1824 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
1740 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 1825 -1, 1 },
1826 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
1741 { 0 } 1827 { 0 }
1742 }; 1828 };
1743 static const struct intr_info mps_stat_sram_intr_info[] = { 1829 static const struct intr_info mps_stat_sram_intr_info[] = {
@@ -1753,37 +1839,37 @@ static void mps_intr_handler(struct adapter *adapter)
1753 { 0 } 1839 { 0 }
1754 }; 1840 };
1755 static const struct intr_info mps_cls_intr_info[] = { 1841 static const struct intr_info mps_cls_intr_info[] = {
1756 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 1842 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
1757 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 1843 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
1758 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 1844 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
1759 { 0 } 1845 { 0 }
1760 }; 1846 };
1761 1847
1762 int fat; 1848 int fat;
1763 1849
1764 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, 1850 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
1765 mps_rx_intr_info) + 1851 mps_rx_intr_info) +
1766 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, 1852 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
1767 mps_tx_intr_info) + 1853 mps_tx_intr_info) +
1768 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, 1854 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
1769 mps_trc_intr_info) + 1855 mps_trc_intr_info) +
1770 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, 1856 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
1771 mps_stat_sram_intr_info) + 1857 mps_stat_sram_intr_info) +
1772 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 1858 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
1773 mps_stat_tx_intr_info) + 1859 mps_stat_tx_intr_info) +
1774 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 1860 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
1775 mps_stat_rx_intr_info) + 1861 mps_stat_rx_intr_info) +
1776 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, 1862 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
1777 mps_cls_intr_info); 1863 mps_cls_intr_info);
1778 1864
1779 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | 1865 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
1780 RXINT | TXINT | STATINT); 1866 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
1781 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1782 if (fat) 1867 if (fat)
1783 t4_fatal_err(adapter); 1868 t4_fatal_err(adapter);
1784} 1869}
1785 1870
1786#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 1871#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
1872 ECC_UE_INT_CAUSE_F)
1787 1873
1788/* 1874/*
1789 * EDC/MC interrupt handler. 1875 * EDC/MC interrupt handler.
@@ -1795,40 +1881,40 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
1795 unsigned int addr, cnt_addr, v; 1881 unsigned int addr, cnt_addr, v;
1796 1882
1797 if (idx <= MEM_EDC1) { 1883 if (idx <= MEM_EDC1) {
1798 addr = EDC_REG(EDC_INT_CAUSE, idx); 1884 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
1799 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 1885 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
1800 } else if (idx == MEM_MC) { 1886 } else if (idx == MEM_MC) {
1801 if (is_t4(adapter->params.chip)) { 1887 if (is_t4(adapter->params.chip)) {
1802 addr = MC_INT_CAUSE; 1888 addr = MC_INT_CAUSE_A;
1803 cnt_addr = MC_ECC_STATUS; 1889 cnt_addr = MC_ECC_STATUS_A;
1804 } else { 1890 } else {
1805 addr = MC_P_INT_CAUSE; 1891 addr = MC_P_INT_CAUSE_A;
1806 cnt_addr = MC_P_ECC_STATUS; 1892 cnt_addr = MC_P_ECC_STATUS_A;
1807 } 1893 }
1808 } else { 1894 } else {
1809 addr = MC_REG(MC_P_INT_CAUSE, 1); 1895 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
1810 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1); 1896 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
1811 } 1897 }
1812 1898
1813 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 1899 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1814 if (v & PERR_INT_CAUSE) 1900 if (v & PERR_INT_CAUSE_F)
1815 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", 1901 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1816 name[idx]); 1902 name[idx]);
1817 if (v & ECC_CE_INT_CAUSE) { 1903 if (v & ECC_CE_INT_CAUSE_F) {
1818 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); 1904 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
1819 1905
1820 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); 1906 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
1821 if (printk_ratelimit()) 1907 if (printk_ratelimit())
1822 dev_warn(adapter->pdev_dev, 1908 dev_warn(adapter->pdev_dev,
1823 "%u %s correctable ECC data error%s\n", 1909 "%u %s correctable ECC data error%s\n",
1824 cnt, name[idx], cnt > 1 ? "s" : ""); 1910 cnt, name[idx], cnt > 1 ? "s" : "");
1825 } 1911 }
1826 if (v & ECC_UE_INT_CAUSE) 1912 if (v & ECC_UE_INT_CAUSE_F)
1827 dev_alert(adapter->pdev_dev, 1913 dev_alert(adapter->pdev_dev,
1828 "%s uncorrectable ECC data error\n", name[idx]); 1914 "%s uncorrectable ECC data error\n", name[idx]);
1829 1915
1830 t4_write_reg(adapter, addr, v); 1916 t4_write_reg(adapter, addr, v);
1831 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 1917 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
1832 t4_fatal_err(adapter); 1918 t4_fatal_err(adapter);
1833} 1919}
1834 1920
@@ -1837,26 +1923,26 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
1837 */ 1923 */
1838static void ma_intr_handler(struct adapter *adap) 1924static void ma_intr_handler(struct adapter *adap)
1839{ 1925{
1840 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); 1926 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
1841 1927
1842 if (status & MEM_PERR_INT_CAUSE) { 1928 if (status & MEM_PERR_INT_CAUSE_F) {
1843 dev_alert(adap->pdev_dev, 1929 dev_alert(adap->pdev_dev,
1844 "MA parity error, parity status %#x\n", 1930 "MA parity error, parity status %#x\n",
1845 t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); 1931 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
1846 if (is_t5(adap->params.chip)) 1932 if (is_t5(adap->params.chip))
1847 dev_alert(adap->pdev_dev, 1933 dev_alert(adap->pdev_dev,
1848 "MA parity error, parity status %#x\n", 1934 "MA parity error, parity status %#x\n",
1849 t4_read_reg(adap, 1935 t4_read_reg(adap,
1850 MA_PARITY_ERROR_STATUS2)); 1936 MA_PARITY_ERROR_STATUS2_A));
1851 } 1937 }
1852 if (status & MEM_WRAP_INT_CAUSE) { 1938 if (status & MEM_WRAP_INT_CAUSE_F) {
1853 v = t4_read_reg(adap, MA_INT_WRAP_STATUS); 1939 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
1854 dev_alert(adap->pdev_dev, "MA address wrap-around error by " 1940 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1855 "client %u to address %#x\n", 1941 "client %u to address %#x\n",
1856 MEM_WRAP_CLIENT_NUM_GET(v), 1942 MEM_WRAP_CLIENT_NUM_G(v),
1857 MEM_WRAP_ADDRESS_GET(v) << 4); 1943 MEM_WRAP_ADDRESS_G(v) << 4);
1858 } 1944 }
1859 t4_write_reg(adap, MA_INT_CAUSE, status); 1945 t4_write_reg(adap, MA_INT_CAUSE_A, status);
1860 t4_fatal_err(adap); 1946 t4_fatal_err(adap);
1861} 1947}
1862 1948
@@ -1866,13 +1952,13 @@ static void ma_intr_handler(struct adapter *adap)
1866static void smb_intr_handler(struct adapter *adap) 1952static void smb_intr_handler(struct adapter *adap)
1867{ 1953{
1868 static const struct intr_info smb_intr_info[] = { 1954 static const struct intr_info smb_intr_info[] = {
1869 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 1955 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
1870 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 1956 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
1871 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 1957 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
1872 { 0 } 1958 { 0 }
1873 }; 1959 };
1874 1960
1875 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) 1961 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
1876 t4_fatal_err(adap); 1962 t4_fatal_err(adap);
1877} 1963}
1878 1964
@@ -1882,14 +1968,14 @@ static void smb_intr_handler(struct adapter *adap)
1882static void ncsi_intr_handler(struct adapter *adap) 1968static void ncsi_intr_handler(struct adapter *adap)
1883{ 1969{
1884 static const struct intr_info ncsi_intr_info[] = { 1970 static const struct intr_info ncsi_intr_info[] = {
1885 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 1971 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
1886 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 1972 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
1887 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 1973 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
1888 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 1974 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
1889 { 0 } 1975 { 0 }
1890 }; 1976 };
1891 1977
1892 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) 1978 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
1893 t4_fatal_err(adap); 1979 t4_fatal_err(adap);
1894} 1980}
1895 1981
@@ -1901,23 +1987,23 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
1901 u32 v, int_cause_reg; 1987 u32 v, int_cause_reg;
1902 1988
1903 if (is_t4(adap->params.chip)) 1989 if (is_t4(adap->params.chip))
1904 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); 1990 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
1905 else 1991 else
1906 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); 1992 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
1907 1993
1908 v = t4_read_reg(adap, int_cause_reg); 1994 v = t4_read_reg(adap, int_cause_reg);
1909 1995
1910 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 1996 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
1911 if (!v) 1997 if (!v)
1912 return; 1998 return;
1913 1999
1914 if (v & TXFIFO_PRTY_ERR) 2000 if (v & TXFIFO_PRTY_ERR_F)
1915 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", 2001 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1916 port); 2002 port);
1917 if (v & RXFIFO_PRTY_ERR) 2003 if (v & RXFIFO_PRTY_ERR_F)
1918 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", 2004 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1919 port); 2005 port);
1920 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); 2006 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
1921 t4_fatal_err(adap); 2007 t4_fatal_err(adap);
1922} 2008}
1923 2009
@@ -1927,19 +2013,19 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
1927static void pl_intr_handler(struct adapter *adap) 2013static void pl_intr_handler(struct adapter *adap)
1928{ 2014{
1929 static const struct intr_info pl_intr_info[] = { 2015 static const struct intr_info pl_intr_info[] = {
1930 { FATALPERR, "T4 fatal parity error", -1, 1 }, 2016 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
1931 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 2017 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
1932 { 0 } 2018 { 0 }
1933 }; 2019 };
1934 2020
1935 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) 2021 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
1936 t4_fatal_err(adap); 2022 t4_fatal_err(adap);
1937} 2023}
1938 2024
1939#define PF_INTR_MASK (PFSW) 2025#define PF_INTR_MASK (PFSW_F)
1940#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ 2026#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
1941 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ 2027 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
1942 CPL_SWITCH | SGE | ULP_TX) 2028 CPL_SWITCH_F | SGE_F | ULP_TX_F)
1943 2029
1944/** 2030/**
1945 * t4_slow_intr_handler - control path interrupt handler 2031 * t4_slow_intr_handler - control path interrupt handler
@@ -1951,60 +2037,60 @@ static void pl_intr_handler(struct adapter *adap)
1951 */ 2037 */
1952int t4_slow_intr_handler(struct adapter *adapter) 2038int t4_slow_intr_handler(struct adapter *adapter)
1953{ 2039{
1954 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); 2040 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
1955 2041
1956 if (!(cause & GLBL_INTR_MASK)) 2042 if (!(cause & GLBL_INTR_MASK))
1957 return 0; 2043 return 0;
1958 if (cause & CIM) 2044 if (cause & CIM_F)
1959 cim_intr_handler(adapter); 2045 cim_intr_handler(adapter);
1960 if (cause & MPS) 2046 if (cause & MPS_F)
1961 mps_intr_handler(adapter); 2047 mps_intr_handler(adapter);
1962 if (cause & NCSI) 2048 if (cause & NCSI_F)
1963 ncsi_intr_handler(adapter); 2049 ncsi_intr_handler(adapter);
1964 if (cause & PL) 2050 if (cause & PL_F)
1965 pl_intr_handler(adapter); 2051 pl_intr_handler(adapter);
1966 if (cause & SMB) 2052 if (cause & SMB_F)
1967 smb_intr_handler(adapter); 2053 smb_intr_handler(adapter);
1968 if (cause & XGMAC0) 2054 if (cause & XGMAC0_F)
1969 xgmac_intr_handler(adapter, 0); 2055 xgmac_intr_handler(adapter, 0);
1970 if (cause & XGMAC1) 2056 if (cause & XGMAC1_F)
1971 xgmac_intr_handler(adapter, 1); 2057 xgmac_intr_handler(adapter, 1);
1972 if (cause & XGMAC_KR0) 2058 if (cause & XGMAC_KR0_F)
1973 xgmac_intr_handler(adapter, 2); 2059 xgmac_intr_handler(adapter, 2);
1974 if (cause & XGMAC_KR1) 2060 if (cause & XGMAC_KR1_F)
1975 xgmac_intr_handler(adapter, 3); 2061 xgmac_intr_handler(adapter, 3);
1976 if (cause & PCIE) 2062 if (cause & PCIE_F)
1977 pcie_intr_handler(adapter); 2063 pcie_intr_handler(adapter);
1978 if (cause & MC) 2064 if (cause & MC_F)
1979 mem_intr_handler(adapter, MEM_MC); 2065 mem_intr_handler(adapter, MEM_MC);
1980 if (!is_t4(adapter->params.chip) && (cause & MC1)) 2066 if (!is_t4(adapter->params.chip) && (cause & MC1_S))
1981 mem_intr_handler(adapter, MEM_MC1); 2067 mem_intr_handler(adapter, MEM_MC1);
1982 if (cause & EDC0) 2068 if (cause & EDC0_F)
1983 mem_intr_handler(adapter, MEM_EDC0); 2069 mem_intr_handler(adapter, MEM_EDC0);
1984 if (cause & EDC1) 2070 if (cause & EDC1_F)
1985 mem_intr_handler(adapter, MEM_EDC1); 2071 mem_intr_handler(adapter, MEM_EDC1);
1986 if (cause & LE) 2072 if (cause & LE_F)
1987 le_intr_handler(adapter); 2073 le_intr_handler(adapter);
1988 if (cause & TP) 2074 if (cause & TP_F)
1989 tp_intr_handler(adapter); 2075 tp_intr_handler(adapter);
1990 if (cause & MA) 2076 if (cause & MA_F)
1991 ma_intr_handler(adapter); 2077 ma_intr_handler(adapter);
1992 if (cause & PM_TX) 2078 if (cause & PM_TX_F)
1993 pmtx_intr_handler(adapter); 2079 pmtx_intr_handler(adapter);
1994 if (cause & PM_RX) 2080 if (cause & PM_RX_F)
1995 pmrx_intr_handler(adapter); 2081 pmrx_intr_handler(adapter);
1996 if (cause & ULP_RX) 2082 if (cause & ULP_RX_F)
1997 ulprx_intr_handler(adapter); 2083 ulprx_intr_handler(adapter);
1998 if (cause & CPL_SWITCH) 2084 if (cause & CPL_SWITCH_F)
1999 cplsw_intr_handler(adapter); 2085 cplsw_intr_handler(adapter);
2000 if (cause & SGE) 2086 if (cause & SGE_F)
2001 sge_intr_handler(adapter); 2087 sge_intr_handler(adapter);
2002 if (cause & ULP_TX) 2088 if (cause & ULP_TX_F)
2003 ulptx_intr_handler(adapter); 2089 ulptx_intr_handler(adapter);
2004 2090
2005 /* Clear the interrupts just processed for which we are the master. */ 2091 /* Clear the interrupts just processed for which we are the master. */
2006 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); 2092 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
2007 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ 2093 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
2008 return 1; 2094 return 1;
2009} 2095}
2010 2096
@@ -2023,19 +2109,19 @@ int t4_slow_intr_handler(struct adapter *adapter)
2023 */ 2109 */
2024void t4_intr_enable(struct adapter *adapter) 2110void t4_intr_enable(struct adapter *adapter)
2025{ 2111{
2026 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 2112 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
2027 2113
2028 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | 2114 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
2029 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | 2115 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
2030 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | 2116 ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
2031 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 2117 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
2032 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 2118 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
2033 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 2119 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
2034 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | 2120 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
2035 DBFIFO_HP_INT | DBFIFO_LP_INT | 2121 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
2036 EGRESS_SIZE_ERR); 2122 EGRESS_SIZE_ERR_F);
2037 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); 2123 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
2038 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); 2124 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
2039} 2125}
2040 2126
2041/** 2127/**
@@ -2048,10 +2134,10 @@ void t4_intr_enable(struct adapter *adapter)
2048 */ 2134 */
2049void t4_intr_disable(struct adapter *adapter) 2135void t4_intr_disable(struct adapter *adapter)
2050{ 2136{
2051 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 2137 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
2052 2138
2053 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); 2139 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
2054 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); 2140 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
2055} 2141}
2056 2142
2057/** 2143/**
@@ -2166,6 +2252,147 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2166 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2252 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2167} 2253}
2168 2254
2255/* Read an RSS table row */
2256static int rd_rss_row(struct adapter *adap, int row, u32 *val)
2257{
2258 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
2259 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
2260 5, 0, val);
2261}
2262
2263/**
2264 * t4_read_rss - read the contents of the RSS mapping table
2265 * @adapter: the adapter
2266 * @map: holds the contents of the RSS mapping table
2267 *
2268 * Reads the contents of the RSS hash->queue mapping table.
2269 */
2270int t4_read_rss(struct adapter *adapter, u16 *map)
2271{
2272 u32 val;
2273 int i, ret;
2274
2275 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
2276 ret = rd_rss_row(adapter, i, &val);
2277 if (ret)
2278 return ret;
2279 *map++ = LKPTBLQUEUE0_G(val);
2280 *map++ = LKPTBLQUEUE1_G(val);
2281 }
2282 return 0;
2283}
2284
2285/**
2286 * t4_read_rss_key - read the global RSS key
2287 * @adap: the adapter
2288 * @key: 10-entry array holding the 320-bit RSS key
2289 *
2290 * Reads the global 320-bit RSS key.
2291 */
2292void t4_read_rss_key(struct adapter *adap, u32 *key)
2293{
2294 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
2295 TP_RSS_SECRET_KEY0_A);
2296}
2297
2298/**
2299 * t4_write_rss_key - program one of the RSS keys
2300 * @adap: the adapter
2301 * @key: 10-entry array holding the 320-bit RSS key
2302 * @idx: which RSS key to write
2303 *
2304 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2305 * 0..15 the corresponding entry in the RSS key table is written,
2306 * otherwise the global RSS key is written.
2307 */
2308void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
2309{
2310 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
2311 TP_RSS_SECRET_KEY0_A);
2312 if (idx >= 0 && idx < 16)
2313 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
2314 KEYWRADDR_V(idx) | KEYWREN_F);
2315}
2316
2317/**
2318 * t4_read_rss_pf_config - read PF RSS Configuration Table
2319 * @adapter: the adapter
2320 * @index: the entry in the PF RSS table to read
2321 * @valp: where to store the returned value
2322 *
2323 * Reads the PF RSS Configuration Table at the specified index and returns
2324 * the value found there.
2325 */
2326void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
2327 u32 *valp)
2328{
2329 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2330 valp, 1, TP_RSS_PF0_CONFIG_A + index);
2331}
2332
2333/**
2334 * t4_read_rss_vf_config - read VF RSS Configuration Table
2335 * @adapter: the adapter
2336 * @index: the entry in the VF RSS table to read
2337 * @vfl: where to store the returned VFL
2338 * @vfh: where to store the returned VFH
2339 *
2340 * Reads the VF RSS Configuration Table at the specified index and returns
2341 * the (VFL, VFH) values found there.
2342 */
2343void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
2344 u32 *vfl, u32 *vfh)
2345{
2346 u32 vrt, mask, data;
2347
2348 mask = VFWRADDR_V(VFWRADDR_M);
2349 data = VFWRADDR_V(index);
2350
2351 /* Request that the index'th VF Table values be read into VFL/VFH.
2352 */
2353 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
2354 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
2355 vrt |= data | VFRDEN_F;
2356 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
2357
2358 /* Grab the VFL/VFH values ...
2359 */
2360 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2361 vfl, 1, TP_RSS_VFL_CONFIG_A);
2362 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2363 vfh, 1, TP_RSS_VFH_CONFIG_A);
2364}
2365
2366/**
2367 * t4_read_rss_pf_map - read PF RSS Map
2368 * @adapter: the adapter
2369 *
2370 * Reads the PF RSS Map register and returns its value.
2371 */
2372u32 t4_read_rss_pf_map(struct adapter *adapter)
2373{
2374 u32 pfmap;
2375
2376 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2377 &pfmap, 1, TP_RSS_PF_MAP_A);
2378 return pfmap;
2379}
2380
2381/**
2382 * t4_read_rss_pf_mask - read PF RSS Mask
2383 * @adapter: the adapter
2384 *
2385 * Reads the PF RSS Mask register and returns its value.
2386 */
2387u32 t4_read_rss_pf_mask(struct adapter *adapter)
2388{
2389 u32 pfmask;
2390
2391 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2392 &pfmask, 1, TP_RSS_PF_MSK_A);
2393 return pfmask;
2394}
2395
2169/** 2396/**
2170 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 2397 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2171 * @adap: the adapter 2398 * @adap: the adapter
@@ -2178,23 +2405,23 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2178void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 2405void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2179 struct tp_tcp_stats *v6) 2406 struct tp_tcp_stats *v6)
2180{ 2407{
2181 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; 2408 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
2182 2409
2183#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) 2410#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
2184#define STAT(x) val[STAT_IDX(x)] 2411#define STAT(x) val[STAT_IDX(x)]
2185#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 2412#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2186 2413
2187 if (v4) { 2414 if (v4) {
2188 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2415 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
2189 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); 2416 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
2190 v4->tcpOutRsts = STAT(OUT_RST); 2417 v4->tcpOutRsts = STAT(OUT_RST);
2191 v4->tcpInSegs = STAT64(IN_SEG); 2418 v4->tcpInSegs = STAT64(IN_SEG);
2192 v4->tcpOutSegs = STAT64(OUT_SEG); 2419 v4->tcpOutSegs = STAT64(OUT_SEG);
2193 v4->tcpRetransSegs = STAT64(RXT_SEG); 2420 v4->tcpRetransSegs = STAT64(RXT_SEG);
2194 } 2421 }
2195 if (v6) { 2422 if (v6) {
2196 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2423 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
2197 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); 2424 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
2198 v6->tcpOutRsts = STAT(OUT_RST); 2425 v6->tcpOutRsts = STAT(OUT_RST);
2199 v6->tcpInSegs = STAT64(IN_SEG); 2426 v6->tcpInSegs = STAT64(IN_SEG);
2200 v6->tcpOutSegs = STAT64(OUT_SEG); 2427 v6->tcpOutSegs = STAT64(OUT_SEG);
@@ -2219,16 +2446,37 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2219 int i; 2446 int i;
2220 2447
2221 for (i = 0; i < NMTUS; ++i) { 2448 for (i = 0; i < NMTUS; ++i) {
2222 t4_write_reg(adap, TP_MTU_TABLE, 2449 t4_write_reg(adap, TP_MTU_TABLE_A,
2223 MTUINDEX(0xff) | MTUVALUE(i)); 2450 MTUINDEX_V(0xff) | MTUVALUE_V(i));
2224 v = t4_read_reg(adap, TP_MTU_TABLE); 2451 v = t4_read_reg(adap, TP_MTU_TABLE_A);
2225 mtus[i] = MTUVALUE_GET(v); 2452 mtus[i] = MTUVALUE_G(v);
2226 if (mtu_log) 2453 if (mtu_log)
2227 mtu_log[i] = MTUWIDTH_GET(v); 2454 mtu_log[i] = MTUWIDTH_G(v);
2228 } 2455 }
2229} 2456}
2230 2457
2231/** 2458/**
2459 * t4_read_cong_tbl - reads the congestion control table
2460 * @adap: the adapter
2461 * @incr: where to store the alpha values
2462 *
2463 * Reads the additive increments programmed into the HW congestion
2464 * control table.
2465 */
2466void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
2467{
2468 unsigned int mtu, w;
2469
2470 for (mtu = 0; mtu < NMTUS; ++mtu)
2471 for (w = 0; w < NCCTRL_WIN; ++w) {
2472 t4_write_reg(adap, TP_CCTRL_TABLE_A,
2473 ROWINDEX_V(0xffff) | (mtu << 5) | w);
2474 incr[mtu][w] = (u16)t4_read_reg(adap,
2475 TP_CCTRL_TABLE_A) & 0x1fff;
2476 }
2477}
2478
2479/**
2232 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 2480 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2233 * @adap: the adapter 2481 * @adap: the adapter
2234 * @addr: the indirect TP register address 2482 * @addr: the indirect TP register address
@@ -2240,9 +2488,9 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2240void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 2488void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2241 unsigned int mask, unsigned int val) 2489 unsigned int mask, unsigned int val)
2242{ 2490{
2243 t4_write_reg(adap, TP_PIO_ADDR, addr); 2491 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
2244 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask; 2492 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
2245 t4_write_reg(adap, TP_PIO_DATA, val); 2493 t4_write_reg(adap, TP_PIO_DATA_A, val);
2246} 2494}
2247 2495
2248/** 2496/**
@@ -2321,8 +2569,8 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2321 2569
2322 if (!(mtu & ((1 << log2) >> 2))) /* round */ 2570 if (!(mtu & ((1 << log2) >> 2))) /* round */
2323 log2--; 2571 log2--;
2324 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | 2572 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
2325 MTUWIDTH(log2) | MTUVALUE(mtu)); 2573 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
2326 2574
2327 for (w = 0; w < NCCTRL_WIN; ++w) { 2575 for (w = 0; w < NCCTRL_WIN; ++w) {
2328 unsigned int inc; 2576 unsigned int inc;
@@ -2330,13 +2578,67 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2330 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 2578 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2331 CC_MIN_INCR); 2579 CC_MIN_INCR);
2332 2580
2333 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | 2581 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
2334 (w << 16) | (beta[w] << 13) | inc); 2582 (w << 16) | (beta[w] << 13) | inc);
2335 } 2583 }
2336 } 2584 }
2337} 2585}
2338 2586
2339/** 2587/**
2588 * t4_pmtx_get_stats - returns the HW stats from PMTX
2589 * @adap: the adapter
2590 * @cnt: where to store the count statistics
2591 * @cycles: where to store the cycle statistics
2592 *
2593 * Returns performance statistics from PMTX.
2594 */
2595void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
2596{
2597 int i;
2598 u32 data[2];
2599
2600 for (i = 0; i < PM_NSTATS; i++) {
2601 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
2602 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
2603 if (is_t4(adap->params.chip)) {
2604 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
2605 } else {
2606 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
2607 PM_TX_DBG_DATA_A, data, 2,
2608 PM_TX_DBG_STAT_MSB_A);
2609 cycles[i] = (((u64)data[0] << 32) | data[1]);
2610 }
2611 }
2612}
2613
2614/**
2615 * t4_pmrx_get_stats - returns the HW stats from PMRX
2616 * @adap: the adapter
2617 * @cnt: where to store the count statistics
2618 * @cycles: where to store the cycle statistics
2619 *
2620 * Returns performance statistics from PMRX.
2621 */
2622void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
2623{
2624 int i;
2625 u32 data[2];
2626
2627 for (i = 0; i < PM_NSTATS; i++) {
2628 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
2629 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
2630 if (is_t4(adap->params.chip)) {
2631 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
2632 } else {
2633 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
2634 PM_RX_DBG_DATA_A, data, 2,
2635 PM_RX_DBG_STAT_MSB_A);
2636 cycles[i] = (((u64)data[0] << 32) | data[1]);
2637 }
2638 }
2639}
2640
2641/**
2340 * get_mps_bg_map - return the buffer groups associated with a port 2642 * get_mps_bg_map - return the buffer groups associated with a port
2341 * @adap: the adapter 2643 * @adap: the adapter
2342 * @idx: the port index 2644 * @idx: the port index
@@ -2347,7 +2649,7 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2347 */ 2649 */
2348static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 2650static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2349{ 2651{
2350 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); 2652 u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
2351 2653
2352 if (n == 0) 2654 if (n == 0)
2353 return idx == 0 ? 0xf : 0; 2655 return idx == 0 ? 0xf : 0;
@@ -2485,11 +2787,11 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2485 if (is_t4(adap->params.chip)) { 2787 if (is_t4(adap->params.chip)) {
2486 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); 2788 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2487 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); 2789 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2488 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2790 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
2489 } else { 2791 } else {
2490 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); 2792 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2491 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); 2793 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2492 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2794 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
2493 } 2795 }
2494 2796
2495 if (addr) { 2797 if (addr) {
@@ -2499,8 +2801,8 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2499 t4_write_reg(adap, mag_id_reg_h, 2801 t4_write_reg(adap, mag_id_reg_h,
2500 (addr[0] << 8) | addr[1]); 2802 (addr[0] << 8) | addr[1]);
2501 } 2803 }
2502 t4_set_reg_field(adap, port_cfg_reg, MAGICEN, 2804 t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
2503 addr ? MAGICEN : 0); 2805 addr ? MAGICEN_F : 0);
2504} 2806}
2505 2807
2506/** 2808/**
@@ -2525,20 +2827,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2525 u32 port_cfg_reg; 2827 u32 port_cfg_reg;
2526 2828
2527 if (is_t4(adap->params.chip)) 2829 if (is_t4(adap->params.chip))
2528 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2830 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
2529 else 2831 else
2530 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2832 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
2531 2833
2532 if (!enable) { 2834 if (!enable) {
2533 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0); 2835 t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
2534 return 0; 2836 return 0;
2535 } 2837 }
2536 if (map > 0xff) 2838 if (map > 0xff)
2537 return -EINVAL; 2839 return -EINVAL;
2538 2840
2539#define EPIO_REG(name) \ 2841#define EPIO_REG(name) \
2540 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ 2842 (is_t4(adap->params.chip) ? \
2541 T5_PORT_REG(port, MAC_PORT_EPIO_##name)) 2843 PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
2844 T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
2542 2845
2543 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2846 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2544 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 2847 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2550,21 +2853,21 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2550 2853
2551 /* write byte masks */ 2854 /* write byte masks */
2552 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 2855 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2553 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); 2856 t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
2554 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2857 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2555 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2858 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
2556 return -ETIMEDOUT; 2859 return -ETIMEDOUT;
2557 2860
2558 /* write CRC */ 2861 /* write CRC */
2559 t4_write_reg(adap, EPIO_REG(DATA0), crc); 2862 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2560 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); 2863 t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
2561 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2864 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2562 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2865 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
2563 return -ETIMEDOUT; 2866 return -ETIMEDOUT;
2564 } 2867 }
2565#undef EPIO_REG 2868#undef EPIO_REG
2566 2869
2567 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); 2870 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
2568 return 0; 2871 return 0;
2569} 2872}
2570 2873
@@ -2749,9 +3052,9 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2749 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 3052 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2750 }; 3053 };
2751 static const u32 sge_regs[] = { 3054 static const u32 sge_regs[] = {
2752 SGE_DEBUG_DATA_LOW_INDEX_2, 3055 SGE_DEBUG_DATA_LOW_INDEX_2_A,
2753 SGE_DEBUG_DATA_LOW_INDEX_3, 3056 SGE_DEBUG_DATA_LOW_INDEX_3_A,
2754 SGE_DEBUG_DATA_HIGH_INDEX_10, 3057 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
2755 }; 3058 };
2756 const char **sge_idma_decode; 3059 const char **sge_idma_decode;
2757 int sge_idma_decode_nstates; 3060 int sge_idma_decode_nstates;
@@ -2818,7 +3121,7 @@ retry:
2818 if (ret < 0) { 3121 if (ret < 0) {
2819 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 3122 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2820 goto retry; 3123 goto retry;
2821 if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR) 3124 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
2822 t4_report_fw_error(adap); 3125 t4_report_fw_error(adap);
2823 return ret; 3126 return ret;
2824 } 3127 }
@@ -2868,8 +3171,8 @@ retry:
2868 * timeout ... and then retry if we haven't exhausted 3171 * timeout ... and then retry if we haven't exhausted
2869 * our retries ... 3172 * our retries ...
2870 */ 3173 */
2871 pcie_fw = t4_read_reg(adap, MA_PCIE_FW); 3174 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
2872 if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) { 3175 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
2873 if (waiting <= 0) { 3176 if (waiting <= 0) {
2874 if (retries-- > 0) 3177 if (retries-- > 0)
2875 goto retry; 3178 goto retry;
@@ -2884,9 +3187,9 @@ retry:
2884 * report errors preferentially. 3187 * report errors preferentially.
2885 */ 3188 */
2886 if (state) { 3189 if (state) {
2887 if (pcie_fw & PCIE_FW_ERR) 3190 if (pcie_fw & PCIE_FW_ERR_F)
2888 *state = DEV_STATE_ERR; 3191 *state = DEV_STATE_ERR;
2889 else if (pcie_fw & PCIE_FW_INIT) 3192 else if (pcie_fw & PCIE_FW_INIT_F)
2890 *state = DEV_STATE_INIT; 3193 *state = DEV_STATE_INIT;
2891 } 3194 }
2892 3195
@@ -2896,7 +3199,7 @@ retry:
2896 * for our caller. 3199 * for our caller.
2897 */ 3200 */
2898 if (master_mbox == PCIE_FW_MASTER_M && 3201 if (master_mbox == PCIE_FW_MASTER_M &&
2899 (pcie_fw & PCIE_FW_MASTER_VLD)) 3202 (pcie_fw & PCIE_FW_MASTER_VLD_F))
2900 master_mbox = PCIE_FW_MASTER_G(pcie_fw); 3203 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
2901 break; 3204 break;
2902 } 3205 }
@@ -2985,7 +3288,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2985 3288
2986 memset(&c, 0, sizeof(c)); 3289 memset(&c, 0, sizeof(c));
2987 INIT_CMD(c, RESET, WRITE); 3290 INIT_CMD(c, RESET, WRITE);
2988 c.val = htonl(PIORST | PIORSTMODE); 3291 c.val = htonl(PIORST_F | PIORSTMODE_F);
2989 c.halt_pkd = htonl(FW_RESET_CMD_HALT_F); 3292 c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
2990 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3293 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2991 } 3294 }
@@ -3004,8 +3307,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
3004 * rather than a RESET ... if it's new enough to understand that ... 3307 * rather than a RESET ... if it's new enough to understand that ...
3005 */ 3308 */
3006 if (ret == 0 || force) { 3309 if (ret == 0 || force) {
3007 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); 3310 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
3008 t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 3311 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
3009 PCIE_FW_HALT_F); 3312 PCIE_FW_HALT_F);
3010 } 3313 }
3011 3314
@@ -3045,7 +3348,7 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3045 * doing it automatically, we need to clear the PCIE_FW.HALT 3348 * doing it automatically, we need to clear the PCIE_FW.HALT
3046 * bit. 3349 * bit.
3047 */ 3350 */
3048 t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0); 3351 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
3049 3352
3050 /* 3353 /*
3051 * If we've been given a valid mailbox, first try to get the 3354 * If we've been given a valid mailbox, first try to get the
@@ -3055,21 +3358,21 @@ static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3055 * hitting the chip with a hammer. 3358 * hitting the chip with a hammer.
3056 */ 3359 */
3057 if (mbox <= PCIE_FW_MASTER_M) { 3360 if (mbox <= PCIE_FW_MASTER_M) {
3058 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 3361 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
3059 msleep(100); 3362 msleep(100);
3060 if (t4_fw_reset(adap, mbox, 3363 if (t4_fw_reset(adap, mbox,
3061 PIORST | PIORSTMODE) == 0) 3364 PIORST_F | PIORSTMODE_F) == 0)
3062 return 0; 3365 return 0;
3063 } 3366 }
3064 3367
3065 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE); 3368 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
3066 msleep(2000); 3369 msleep(2000);
3067 } else { 3370 } else {
3068 int ms; 3371 int ms;
3069 3372
3070 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 3373 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
3071 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 3374 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3072 if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F)) 3375 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
3073 return 0; 3376 return 0;
3074 msleep(100); 3377 msleep(100);
3075 ms += 100; 3378 ms += 100;
@@ -3148,22 +3451,23 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3148 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; 3451 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3149 unsigned int fl_align_log = fls(fl_align) - 1; 3452 unsigned int fl_align_log = fls(fl_align) - 1;
3150 3453
3151 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, 3454 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
3152 HOSTPAGESIZEPF0(sge_hps) | 3455 HOSTPAGESIZEPF0_V(sge_hps) |
3153 HOSTPAGESIZEPF1(sge_hps) | 3456 HOSTPAGESIZEPF1_V(sge_hps) |
3154 HOSTPAGESIZEPF2(sge_hps) | 3457 HOSTPAGESIZEPF2_V(sge_hps) |
3155 HOSTPAGESIZEPF3(sge_hps) | 3458 HOSTPAGESIZEPF3_V(sge_hps) |
3156 HOSTPAGESIZEPF4(sge_hps) | 3459 HOSTPAGESIZEPF4_V(sge_hps) |
3157 HOSTPAGESIZEPF5(sge_hps) | 3460 HOSTPAGESIZEPF5_V(sge_hps) |
3158 HOSTPAGESIZEPF6(sge_hps) | 3461 HOSTPAGESIZEPF6_V(sge_hps) |
3159 HOSTPAGESIZEPF7(sge_hps)); 3462 HOSTPAGESIZEPF7_V(sge_hps));
3160 3463
3161 if (is_t4(adap->params.chip)) { 3464 if (is_t4(adap->params.chip)) {
3162 t4_set_reg_field(adap, SGE_CONTROL, 3465 t4_set_reg_field(adap, SGE_CONTROL_A,
3163 INGPADBOUNDARY_MASK | 3466 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
3164 EGRSTATUSPAGESIZE_MASK, 3467 EGRSTATUSPAGESIZE_F,
3165 INGPADBOUNDARY(fl_align_log - 5) | 3468 INGPADBOUNDARY_V(fl_align_log -
3166 EGRSTATUSPAGESIZE(stat_len != 64)); 3469 INGPADBOUNDARY_SHIFT_X) |
3470 EGRSTATUSPAGESIZE_V(stat_len != 64));
3167 } else { 3471 } else {
3168 /* T5 introduced the separation of the Free List Padding and 3472 /* T5 introduced the separation of the Free List Padding and
3169 * Packing Boundaries. Thus, we can select a smaller Padding 3473 * Packing Boundaries. Thus, we can select a smaller Padding
@@ -3193,15 +3497,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3193 fl_align = 64; 3497 fl_align = 64;
3194 fl_align_log = 6; 3498 fl_align_log = 6;
3195 } 3499 }
3196 t4_set_reg_field(adap, SGE_CONTROL, 3500 t4_set_reg_field(adap, SGE_CONTROL_A,
3197 INGPADBOUNDARY_MASK | 3501 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
3198 EGRSTATUSPAGESIZE_MASK, 3502 EGRSTATUSPAGESIZE_F,
3199 INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) | 3503 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
3200 EGRSTATUSPAGESIZE(stat_len != 64)); 3504 EGRSTATUSPAGESIZE_V(stat_len != 64));
3201 t4_set_reg_field(adap, SGE_CONTROL2_A, 3505 t4_set_reg_field(adap, SGE_CONTROL2_A,
3202 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), 3506 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3203 INGPACKBOUNDARY_V(fl_align_log - 3507 INGPACKBOUNDARY_V(fl_align_log -
3204 INGPACKBOUNDARY_SHIFT_X)); 3508 INGPACKBOUNDARY_SHIFT_X));
3205 } 3509 }
3206 /* 3510 /*
3207 * Adjust various SGE Free List Host Buffer Sizes. 3511 * Adjust various SGE Free List Host Buffer Sizes.
@@ -3224,15 +3528,15 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3224 * Default Firmware Configuration File but we need to adjust it for 3528 * Default Firmware Configuration File but we need to adjust it for
3225 * this host's cache line size. 3529 * this host's cache line size.
3226 */ 3530 */
3227 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size); 3531 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
3228 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2, 3532 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
3229 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1) 3533 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
3230 & ~(fl_align-1)); 3534 & ~(fl_align-1));
3231 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3, 3535 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
3232 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1) 3536 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
3233 & ~(fl_align-1)); 3537 & ~(fl_align-1));
3234 3538
3235 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12)); 3539 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
3236 3540
3237 return 0; 3541 return 0;
3238} 3542}
@@ -3917,12 +4221,12 @@ int t4_wait_dev_ready(void __iomem *regs)
3917{ 4221{
3918 u32 whoami; 4222 u32 whoami;
3919 4223
3920 whoami = readl(regs + PL_WHOAMI); 4224 whoami = readl(regs + PL_WHOAMI_A);
3921 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS) 4225 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
3922 return 0; 4226 return 0;
3923 4227
3924 msleep(500); 4228 msleep(500);
3925 whoami = readl(regs + PL_WHOAMI); 4229 whoami = readl(regs + PL_WHOAMI_A);
3926 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO); 4230 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
3927} 4231}
3928 4232
@@ -3946,7 +4250,7 @@ static int get_flash_params(struct adapter *adap)
3946 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); 4250 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3947 if (!ret) 4251 if (!ret)
3948 ret = sf1_read(adap, 3, 0, 1, &info); 4252 ret = sf1_read(adap, 3, 0, 1, &info);
3949 t4_write_reg(adap, SF_OP, 0); /* unlock SF */ 4253 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
3950 if (ret) 4254 if (ret)
3951 return ret; 4255 return ret;
3952 4256
@@ -3969,7 +4273,7 @@ static int get_flash_params(struct adapter *adap)
3969 return -EINVAL; 4273 return -EINVAL;
3970 adap->params.sf_size = 1 << info; 4274 adap->params.sf_size = 1 << info;
3971 adap->params.sf_fw_start = 4275 adap->params.sf_fw_start =
3972 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; 4276 t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
3973 4277
3974 if (adap->params.sf_size < FLASH_MIN_SIZE) 4278 if (adap->params.sf_size < FLASH_MIN_SIZE)
3975 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n", 4279 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
@@ -3993,7 +4297,7 @@ int t4_prep_adapter(struct adapter *adapter)
3993 u32 pl_rev; 4297 u32 pl_rev;
3994 4298
3995 get_pci_mode(adapter, &adapter->params.pci); 4299 get_pci_mode(adapter, &adapter->params.pci);
3996 pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); 4300 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
3997 4301
3998 ret = get_flash_params(adapter); 4302 ret = get_flash_params(adapter);
3999 if (ret < 0) { 4303 if (ret < 0) {
@@ -4019,6 +4323,7 @@ int t4_prep_adapter(struct adapter *adapter)
4019 return -EINVAL; 4323 return -EINVAL;
4020 } 4324 }
4021 4325
4326 adapter->params.cim_la_size = CIMLA_SIZE;
4022 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 4327 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4023 4328
4024 /* 4329 /*
@@ -4133,7 +4438,7 @@ int t4_init_sge_params(struct adapter *adapter)
4133 4438
4134 /* Extract the SGE Page Size for our PF. 4439 /* Extract the SGE Page Size for our PF.
4135 */ 4440 */
4136 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE); 4441 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
4137 s_hps = (HOSTPAGESIZEPF0_S + 4442 s_hps = (HOSTPAGESIZEPF0_S +
4138 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn); 4443 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
4139 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M); 4444 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
@@ -4142,10 +4447,10 @@ int t4_init_sge_params(struct adapter *adapter)
4142 */ 4447 */
4143 s_qpp = (QUEUESPERPAGEPF0_S + 4448 s_qpp = (QUEUESPERPAGEPF0_S +
4144 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn); 4449 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
4145 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF); 4450 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
4146 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); 4451 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
4147 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF); 4452 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
4148 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK); 4453 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
4149 4454
4150 return 0; 4455 return 0;
4151} 4456}
@@ -4161,9 +4466,9 @@ int t4_init_tp_params(struct adapter *adap)
4161 int chan; 4466 int chan;
4162 u32 v; 4467 u32 v;
4163 4468
4164 v = t4_read_reg(adap, TP_TIMER_RESOLUTION); 4469 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
4165 adap->params.tp.tre = TIMERRESOLUTION_GET(v); 4470 adap->params.tp.tre = TIMERRESOLUTION_G(v);
4166 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); 4471 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
4167 4472
4168 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 4473 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4169 for (chan = 0; chan < NCHAN; chan++) 4474 for (chan = 0; chan < NCHAN; chan++)
@@ -4172,27 +4477,27 @@ int t4_init_tp_params(struct adapter *adap)
4172 /* Cache the adapter's Compressed Filter Mode and global Incress 4477 /* Cache the adapter's Compressed Filter Mode and global Incress
4173 * Configuration. 4478 * Configuration.
4174 */ 4479 */
4175 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4480 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4176 &adap->params.tp.vlan_pri_map, 1, 4481 &adap->params.tp.vlan_pri_map, 1,
4177 TP_VLAN_PRI_MAP); 4482 TP_VLAN_PRI_MAP_A);
4178 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4483 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4179 &adap->params.tp.ingress_config, 1, 4484 &adap->params.tp.ingress_config, 1,
4180 TP_INGRESS_CONFIG); 4485 TP_INGRESS_CONFIG_A);
4181 4486
4182 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 4487 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
4183 * shift positions of several elements of the Compressed Filter Tuple 4488 * shift positions of several elements of the Compressed Filter Tuple
4184 * for this adapter which we need frequently ... 4489 * for this adapter which we need frequently ...
4185 */ 4490 */
4186 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); 4491 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
4187 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 4492 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
4188 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); 4493 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
4189 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, 4494 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4190 F_PROTOCOL); 4495 PROTOCOL_F);
4191 4496
4192 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 4497 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4193 * represents the presense of an Outer VLAN instead of a VNIC ID. 4498 * represents the presense of an Outer VLAN instead of a VNIC ID.
4194 */ 4499 */
4195 if ((adap->params.tp.ingress_config & F_VNIC) == 0) 4500 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
4196 adap->params.tp.vnic_shift = -1; 4501 adap->params.tp.vnic_shift = -1;
4197 4502
4198 return 0; 4503 return 0;
@@ -4218,35 +4523,35 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4218 4523
4219 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 4524 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4220 switch (filter_mode & sel) { 4525 switch (filter_mode & sel) {
4221 case F_FCOE: 4526 case FCOE_F:
4222 field_shift += W_FT_FCOE; 4527 field_shift += FT_FCOE_W;
4223 break; 4528 break;
4224 case F_PORT: 4529 case PORT_F:
4225 field_shift += W_FT_PORT; 4530 field_shift += FT_PORT_W;
4226 break; 4531 break;
4227 case F_VNIC_ID: 4532 case VNIC_ID_F:
4228 field_shift += W_FT_VNIC_ID; 4533 field_shift += FT_VNIC_ID_W;
4229 break; 4534 break;
4230 case F_VLAN: 4535 case VLAN_F:
4231 field_shift += W_FT_VLAN; 4536 field_shift += FT_VLAN_W;
4232 break; 4537 break;
4233 case F_TOS: 4538 case TOS_F:
4234 field_shift += W_FT_TOS; 4539 field_shift += FT_TOS_W;
4235 break; 4540 break;
4236 case F_PROTOCOL: 4541 case PROTOCOL_F:
4237 field_shift += W_FT_PROTOCOL; 4542 field_shift += FT_PROTOCOL_W;
4238 break; 4543 break;
4239 case F_ETHERTYPE: 4544 case ETHERTYPE_F:
4240 field_shift += W_FT_ETHERTYPE; 4545 field_shift += FT_ETHERTYPE_W;
4241 break; 4546 break;
4242 case F_MACMATCH: 4547 case MACMATCH_F:
4243 field_shift += W_FT_MACMATCH; 4548 field_shift += FT_MACMATCH_W;
4244 break; 4549 break;
4245 case F_MPSHITTYPE: 4550 case MPSHITTYPE_F:
4246 field_shift += W_FT_MPSHITTYPE; 4551 field_shift += FT_MPSHITTYPE_W;
4247 break; 4552 break;
4248 case F_FRAGMENTATION: 4553 case FRAGMENTATION_F:
4249 field_shift += W_FT_FRAGMENTATION; 4554 field_shift += FT_FRAGMENTATION_W;
4250 break; 4555 break;
4251 } 4556 }
4252 } 4557 }
@@ -4311,3 +4616,289 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
4311 } 4616 }
4312 return 0; 4617 return 0;
4313} 4618}
4619
4620/**
4621 * t4_read_cimq_cfg - read CIM queue configuration
4622 * @adap: the adapter
4623 * @base: holds the queue base addresses in bytes
4624 * @size: holds the queue sizes in bytes
4625 * @thres: holds the queue full thresholds in bytes
4626 *
4627 * Returns the current configuration of the CIM queues, starting with
4628 * the IBQs, then the OBQs.
4629 */
4630void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
4631{
4632 unsigned int i, v;
4633 int cim_num_obq = is_t4(adap->params.chip) ?
4634 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4635
4636 for (i = 0; i < CIM_NUM_IBQ; i++) {
4637 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
4638 QUENUMSELECT_V(i));
4639 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
4640 /* value is in 256-byte units */
4641 *base++ = CIMQBASE_G(v) * 256;
4642 *size++ = CIMQSIZE_G(v) * 256;
4643 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
4644 }
4645 for (i = 0; i < cim_num_obq; i++) {
4646 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
4647 QUENUMSELECT_V(i));
4648 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
4649 /* value is in 256-byte units */
4650 *base++ = CIMQBASE_G(v) * 256;
4651 *size++ = CIMQSIZE_G(v) * 256;
4652 }
4653}
4654
4655/**
4656 * t4_read_cim_ibq - read the contents of a CIM inbound queue
4657 * @adap: the adapter
4658 * @qid: the queue index
4659 * @data: where to store the queue contents
4660 * @n: capacity of @data in 32-bit words
4661 *
4662 * Reads the contents of the selected CIM queue starting at address 0 up
4663 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
4664 * error and the number of 32-bit words actually read on success.
4665 */
4666int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
4667{
4668 int i, err, attempts;
4669 unsigned int addr;
4670 const unsigned int nwords = CIM_IBQ_SIZE * 4;
4671
4672 if (qid > 5 || (n & 3))
4673 return -EINVAL;
4674
4675 addr = qid * nwords;
4676 if (n > nwords)
4677 n = nwords;
4678
4679 /* It might take 3-10ms before the IBQ debug read access is allowed.
4680 * Wait for 1 Sec with a delay of 1 usec.
4681 */
4682 attempts = 1000000;
4683
4684 for (i = 0; i < n; i++, addr++) {
4685 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
4686 IBQDBGEN_F);
4687 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
4688 attempts, 1);
4689 if (err)
4690 return err;
4691 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
4692 }
4693 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
4694 return i;
4695}
4696
4697/**
4698 * t4_read_cim_obq - read the contents of a CIM outbound queue
4699 * @adap: the adapter
4700 * @qid: the queue index
4701 * @data: where to store the queue contents
4702 * @n: capacity of @data in 32-bit words
4703 *
4704 * Reads the contents of the selected CIM queue starting at address 0 up
4705 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
4706 * error and the number of 32-bit words actually read on success.
4707 */
4708int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
4709{
4710 int i, err;
4711 unsigned int addr, v, nwords;
4712 int cim_num_obq = is_t4(adap->params.chip) ?
4713 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4714
4715 if ((qid > (cim_num_obq - 1)) || (n & 3))
4716 return -EINVAL;
4717
4718 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
4719 QUENUMSELECT_V(qid));
4720 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
4721
4722 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
4723 nwords = CIMQSIZE_G(v) * 64; /* same */
4724 if (n > nwords)
4725 n = nwords;
4726
4727 for (i = 0; i < n; i++, addr++) {
4728 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
4729 OBQDBGEN_F);
4730 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
4731 2, 1);
4732 if (err)
4733 return err;
4734 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
4735 }
4736 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
4737 return i;
4738}
4739
4740/**
4741 * t4_cim_read - read a block from CIM internal address space
4742 * @adap: the adapter
4743 * @addr: the start address within the CIM address space
4744 * @n: number of words to read
4745 * @valp: where to store the result
4746 *
4747 * Reads a block of 4-byte words from the CIM intenal address space.
4748 */
4749int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
4750 unsigned int *valp)
4751{
4752 int ret = 0;
4753
4754 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
4755 return -EBUSY;
4756
4757 for ( ; !ret && n--; addr += 4) {
4758 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
4759 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
4760 0, 5, 2);
4761 if (!ret)
4762 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
4763 }
4764 return ret;
4765}
4766
4767/**
4768 * t4_cim_write - write a block into CIM internal address space
4769 * @adap: the adapter
4770 * @addr: the start address within the CIM address space
4771 * @n: number of words to write
4772 * @valp: set of values to write
4773 *
4774 * Writes a block of 4-byte words into the CIM intenal address space.
4775 */
4776int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
4777 const unsigned int *valp)
4778{
4779 int ret = 0;
4780
4781 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
4782 return -EBUSY;
4783
4784 for ( ; !ret && n--; addr += 4) {
4785 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
4786 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
4787 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
4788 0, 5, 2);
4789 }
4790 return ret;
4791}
4792
4793static int t4_cim_write1(struct adapter *adap, unsigned int addr,
4794 unsigned int val)
4795{
4796 return t4_cim_write(adap, addr, 1, &val);
4797}
4798
4799/**
4800 * t4_cim_read_la - read CIM LA capture buffer
4801 * @adap: the adapter
4802 * @la_buf: where to store the LA data
4803 * @wrptr: the HW write pointer within the capture buffer
4804 *
4805 * Reads the contents of the CIM LA buffer with the most recent entry at
4806 * the end of the returned data and with the entry at @wrptr first.
4807 * We try to leave the LA in the running state we find it in.
4808 */
4809int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
4810{
4811 int i, ret;
4812 unsigned int cfg, val, idx;
4813
4814 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
4815 if (ret)
4816 return ret;
4817
4818 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
4819 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
4820 if (ret)
4821 return ret;
4822 }
4823
4824 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
4825 if (ret)
4826 goto restart;
4827
4828 idx = UPDBGLAWRPTR_G(val);
4829 if (wrptr)
4830 *wrptr = idx;
4831
4832 for (i = 0; i < adap->params.cim_la_size; i++) {
4833 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
4834 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
4835 if (ret)
4836 break;
4837 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
4838 if (ret)
4839 break;
4840 if (val & UPDBGLARDEN_F) {
4841 ret = -ETIMEDOUT;
4842 break;
4843 }
4844 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
4845 if (ret)
4846 break;
4847 idx = (idx + 1) & UPDBGLARDPTR_M;
4848 }
4849restart:
4850 if (cfg & UPDBGLAEN_F) {
4851 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
4852 cfg & ~UPDBGLARDEN_F);
4853 if (!ret)
4854 ret = r;
4855 }
4856 return ret;
4857}
4858
4859/**
4860 * t4_tp_read_la - read TP LA capture buffer
4861 * @adap: the adapter
4862 * @la_buf: where to store the LA data
4863 * @wrptr: the HW write pointer within the capture buffer
4864 *
4865 * Reads the contents of the TP LA buffer with the most recent entry at
4866 * the end of the returned data and with the entry at @wrptr first.
4867 * We leave the LA in the running state we find it in.
4868 */
4869void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
4870{
4871 bool last_incomplete;
4872 unsigned int i, cfg, val, idx;
4873
4874 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
4875 if (cfg & DBGLAENABLE_F) /* freeze LA */
4876 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
4877 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
4878
4879 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
4880 idx = DBGLAWPTR_G(val);
4881 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
4882 if (last_incomplete)
4883 idx = (idx + 1) & DBGLARPTR_M;
4884 if (wrptr)
4885 *wrptr = idx;
4886
4887 val &= 0xffff;
4888 val &= ~DBGLARPTR_V(DBGLARPTR_M);
4889 val |= adap->params.tp.la_mask;
4890
4891 for (i = 0; i < TPLA_SIZE; i++) {
4892 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
4893 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
4894 idx = (idx + 1) & DBGLARPTR_M;
4895 }
4896
4897 /* Wipe out last entry if it isn't valid */
4898 if (last_incomplete)
4899 la_buf[TPLA_SIZE - 1] = ~0ULL;
4900
4901 if (cfg & DBGLAENABLE_F) /* restore running state */
4902 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
4903 cfg | adap->params.tp.la_mask);
4904}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c19a90e7f7d1..380b15c0417a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -48,6 +48,7 @@ enum {
48 NMTUS = 16, /* size of MTU table */ 48 NMTUS = 16, /* size of MTU table */
49 NCCTRL_WIN = 32, /* # of congestion control windows */ 49 NCCTRL_WIN = 32, /* # of congestion control windows */
50 L2T_SIZE = 4096, /* # of L2T entries */ 50 L2T_SIZE = 4096, /* # of L2T entries */
51 PM_NSTATS = 5, /* # of PM stats */
51 MBOX_LEN = 64, /* mailbox size in bytes */ 52 MBOX_LEN = 64, /* mailbox size in bytes */
52 TRACE_LEN = 112, /* length of trace data and mask */ 53 TRACE_LEN = 112, /* length of trace data and mask */
53 FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ 54 FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
@@ -56,6 +57,17 @@ enum {
56}; 57};
57 58
58enum { 59enum {
60 CIM_NUM_IBQ = 6, /* # of CIM IBQs */
61 CIM_NUM_OBQ = 6, /* # of CIM OBQs */
62 CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
63 CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
64 CIM_IBQ_SIZE = 128, /* # of 128-bit words in a CIM IBQ */
65 CIM_OBQ_SIZE = 128, /* # of 128-bit words in a CIM OBQ */
66 TPLA_SIZE = 128, /* # of 64-bit words in TP LA */
67 ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */
68};
69
70enum {
59 SF_PAGE_SIZE = 256, /* serial flash page size */ 71 SF_PAGE_SIZE = 256, /* serial flash page size */
60 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ 72 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
61}; 73};
@@ -110,6 +122,18 @@ enum {
110 SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */ 122 SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
111}; 123};
112 124
125/* PCI-e memory window access */
126enum pcie_memwin {
127 MEMWIN_NIC = 0,
128 MEMWIN_RSVD1 = 1,
129 MEMWIN_RSVD2 = 2,
130 MEMWIN_RDMA = 3,
131 MEMWIN_RSVD4 = 4,
132 MEMWIN_FOISCSI = 5,
133 MEMWIN_CSIOSTOR = 6,
134 MEMWIN_RSVD7 = 7,
135};
136
113struct sge_qstat { /* data written to SGE queue status entries */ 137struct sge_qstat { /* data written to SGE queue status entries */
114 __be32 qid; 138 __be32 qid;
115 __be16 cidx; 139 __be16 cidx;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 0f89f68948ab..0fb975e258b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -124,6 +124,13 @@ enum CPL_error {
124}; 124};
125 125
126enum { 126enum {
127 CPL_CONN_POLICY_AUTO = 0,
128 CPL_CONN_POLICY_ASK = 1,
129 CPL_CONN_POLICY_FILTER = 2,
130 CPL_CONN_POLICY_DENY = 3
131};
132
133enum {
127 ULP_MODE_NONE = 0, 134 ULP_MODE_NONE = 0,
128 ULP_MODE_ISCSI = 2, 135 ULP_MODE_ISCSI = 2,
129 ULP_MODE_RDMA = 4, 136 ULP_MODE_RDMA = 4,
@@ -160,16 +167,28 @@ union opcode_tid {
160 u8 opcode; 167 u8 opcode;
161}; 168};
162 169
163#define CPL_OPCODE(x) ((x) << 24) 170#define CPL_OPCODE_S 24
164#define G_CPL_OPCODE(x) (((x) >> 24) & 0xFF) 171#define CPL_OPCODE_V(x) ((x) << CPL_OPCODE_S)
165#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) 172#define CPL_OPCODE_G(x) (((x) >> CPL_OPCODE_S) & 0xFF)
173#define TID_G(x) ((x) & 0xFFFFFF)
174
175/* tid is assumed to be 24-bits */
176#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE_V(opcode) | (tid))
177
166#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) 178#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
167#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF) 179
180/* extract the TID from a CPL command */
181#define GET_TID(cmd) (TID_G(be32_to_cpu(OPCODE_TID(cmd))))
168 182
169/* partitioning of TID fields that also carry a queue id */ 183/* partitioning of TID fields that also carry a queue id */
170#define GET_TID_TID(x) ((x) & 0x3fff) 184#define TID_TID_S 0
171#define GET_TID_QID(x) (((x) >> 14) & 0x3ff) 185#define TID_TID_M 0x3fff
172#define TID_QID(x) ((x) << 14) 186#define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M)
187
188#define TID_QID_S 14
189#define TID_QID_M 0x3ff
190#define TID_QID_V(x) ((x) << TID_QID_S)
191#define TID_QID_G(x) (((x) >> TID_QID_S) & TID_QID_M)
173 192
174struct rss_header { 193struct rss_header {
175 u8 opcode; 194 u8 opcode;
@@ -199,8 +218,8 @@ struct work_request_hdr {
199}; 218};
200 219
201/* wr_hi fields */ 220/* wr_hi fields */
202#define S_WR_OP 24 221#define WR_OP_S 24
203#define V_WR_OP(x) ((__u64)(x) << S_WR_OP) 222#define WR_OP_V(x) ((__u64)(x) << WR_OP_S)
204 223
205#define WR_HDR struct work_request_hdr wr 224#define WR_HDR struct work_request_hdr wr
206 225
@@ -270,17 +289,42 @@ struct cpl_pass_open_req {
270 __be32 local_ip; 289 __be32 local_ip;
271 __be32 peer_ip; 290 __be32 peer_ip;
272 __be64 opt0; 291 __be64 opt0;
273#define NO_CONG(x) ((x) << 4)
274#define DELACK(x) ((x) << 5)
275#define DSCP(x) ((x) << 22)
276#define TCAM_BYPASS(x) ((u64)(x) << 48)
277#define NAGLE(x) ((u64)(x) << 49)
278 __be64 opt1; 292 __be64 opt1;
279#define SYN_RSS_ENABLE (1 << 0)
280#define SYN_RSS_QUEUE(x) ((x) << 2)
281#define CONN_POLICY_ASK (1 << 22)
282}; 293};
283 294
295/* option 0 fields */
296#define NO_CONG_S 4
297#define NO_CONG_V(x) ((x) << NO_CONG_S)
298#define NO_CONG_F NO_CONG_V(1U)
299
300#define DELACK_S 5
301#define DELACK_V(x) ((x) << DELACK_S)
302#define DELACK_F DELACK_V(1U)
303
304#define DSCP_S 22
305#define DSCP_M 0x3F
306#define DSCP_V(x) ((x) << DSCP_S)
307#define DSCP_G(x) (((x) >> DSCP_S) & DSCP_M)
308
309#define TCAM_BYPASS_S 48
310#define TCAM_BYPASS_V(x) ((__u64)(x) << TCAM_BYPASS_S)
311#define TCAM_BYPASS_F TCAM_BYPASS_V(1ULL)
312
313#define NAGLE_S 49
314#define NAGLE_V(x) ((__u64)(x) << NAGLE_S)
315#define NAGLE_F NAGLE_V(1ULL)
316
317/* option 1 fields */
318#define SYN_RSS_ENABLE_S 0
319#define SYN_RSS_ENABLE_V(x) ((x) << SYN_RSS_ENABLE_S)
320#define SYN_RSS_ENABLE_F SYN_RSS_ENABLE_V(1U)
321
322#define SYN_RSS_QUEUE_S 2
323#define SYN_RSS_QUEUE_V(x) ((x) << SYN_RSS_QUEUE_S)
324
325#define CONN_POLICY_S 22
326#define CONN_POLICY_V(x) ((x) << CONN_POLICY_S)
327
284struct cpl_pass_open_req6 { 328struct cpl_pass_open_req6 {
285 WR_HDR; 329 WR_HDR;
286 union opcode_tid ot; 330 union opcode_tid ot;
@@ -304,16 +348,37 @@ struct cpl_pass_accept_rpl {
304 WR_HDR; 348 WR_HDR;
305 union opcode_tid ot; 349 union opcode_tid ot;
306 __be32 opt2; 350 __be32 opt2;
307#define RX_COALESCE_VALID(x) ((x) << 11)
308#define RX_COALESCE(x) ((x) << 12)
309#define PACE(x) ((x) << 16)
310#define TX_QUEUE(x) ((x) << 23)
311#define CCTRL_ECN(x) ((x) << 27)
312#define TSTAMPS_EN(x) ((x) << 29)
313#define SACK_EN(x) ((x) << 30)
314 __be64 opt0; 351 __be64 opt0;
315}; 352};
316 353
354/* option 2 fields */
355#define RX_COALESCE_VALID_S 11
356#define RX_COALESCE_VALID_V(x) ((x) << RX_COALESCE_VALID_S)
357#define RX_COALESCE_VALID_F RX_COALESCE_VALID_V(1U)
358
359#define RX_COALESCE_S 12
360#define RX_COALESCE_V(x) ((x) << RX_COALESCE_S)
361
362#define PACE_S 16
363#define PACE_V(x) ((x) << PACE_S)
364
365#define TX_QUEUE_S 23
366#define TX_QUEUE_M 0x7
367#define TX_QUEUE_V(x) ((x) << TX_QUEUE_S)
368#define TX_QUEUE_G(x) (((x) >> TX_QUEUE_S) & TX_QUEUE_M)
369
370#define CCTRL_ECN_S 27
371#define CCTRL_ECN_V(x) ((x) << CCTRL_ECN_S)
372#define CCTRL_ECN_F CCTRL_ECN_V(1U)
373
374#define TSTAMPS_EN_S 29
375#define TSTAMPS_EN_V(x) ((x) << TSTAMPS_EN_S)
376#define TSTAMPS_EN_F TSTAMPS_EN_V(1U)
377
378#define SACK_EN_S 30
379#define SACK_EN_V(x) ((x) << SACK_EN_S)
380#define SACK_EN_F SACK_EN_V(1U)
381
317struct cpl_t5_pass_accept_rpl { 382struct cpl_t5_pass_accept_rpl {
318 WR_HDR; 383 WR_HDR;
319 union opcode_tid ot; 384 union opcode_tid ot;
@@ -384,30 +449,61 @@ struct cpl_t5_act_open_req6 {
384struct cpl_act_open_rpl { 449struct cpl_act_open_rpl {
385 union opcode_tid ot; 450 union opcode_tid ot;
386 __be32 atid_status; 451 __be32 atid_status;
387#define GET_AOPEN_STATUS(x) ((x) & 0xff)
388#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
389}; 452};
390 453
454/* cpl_act_open_rpl.atid_status fields */
455#define AOPEN_STATUS_S 0
456#define AOPEN_STATUS_M 0xFF
457#define AOPEN_STATUS_G(x) (((x) >> AOPEN_STATUS_S) & AOPEN_STATUS_M)
458
459#define AOPEN_ATID_S 8
460#define AOPEN_ATID_M 0xFFFFFF
461#define AOPEN_ATID_G(x) (((x) >> AOPEN_ATID_S) & AOPEN_ATID_M)
462
391struct cpl_pass_establish { 463struct cpl_pass_establish {
392 union opcode_tid ot; 464 union opcode_tid ot;
393 __be32 rsvd; 465 __be32 rsvd;
394 __be32 tos_stid; 466 __be32 tos_stid;
395#define PASS_OPEN_TID(x) ((x) << 0)
396#define PASS_OPEN_TOS(x) ((x) << 24)
397#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
398#define GET_POPEN_TID(x) ((x) & 0xffffff)
399#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
400 __be16 mac_idx; 467 __be16 mac_idx;
401 __be16 tcp_opt; 468 __be16 tcp_opt;
402#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
403#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
404#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
405#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
406#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
407 __be32 snd_isn; 469 __be32 snd_isn;
408 __be32 rcv_isn; 470 __be32 rcv_isn;
409}; 471};
410 472
473/* cpl_pass_establish.tos_stid fields */
474#define PASS_OPEN_TID_S 0
475#define PASS_OPEN_TID_M 0xFFFFFF
476#define PASS_OPEN_TID_V(x) ((x) << PASS_OPEN_TID_S)
477#define PASS_OPEN_TID_G(x) (((x) >> PASS_OPEN_TID_S) & PASS_OPEN_TID_M)
478
479#define PASS_OPEN_TOS_S 24
480#define PASS_OPEN_TOS_M 0xFF
481#define PASS_OPEN_TOS_V(x) ((x) << PASS_OPEN_TOS_S)
482#define PASS_OPEN_TOS_G(x) (((x) >> PASS_OPEN_TOS_S) & PASS_OPEN_TOS_M)
483
484/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
485#define TCPOPT_WSCALE_OK_S 5
486#define TCPOPT_WSCALE_OK_M 0x1
487#define TCPOPT_WSCALE_OK_G(x) \
488 (((x) >> TCPOPT_WSCALE_OK_S) & TCPOPT_WSCALE_OK_M)
489
490#define TCPOPT_SACK_S 6
491#define TCPOPT_SACK_M 0x1
492#define TCPOPT_SACK_G(x) (((x) >> TCPOPT_SACK_S) & TCPOPT_SACK_M)
493
494#define TCPOPT_TSTAMP_S 7
495#define TCPOPT_TSTAMP_M 0x1
496#define TCPOPT_TSTAMP_G(x) (((x) >> TCPOPT_TSTAMP_S) & TCPOPT_TSTAMP_M)
497
498#define TCPOPT_SND_WSCALE_S 8
499#define TCPOPT_SND_WSCALE_M 0xF
500#define TCPOPT_SND_WSCALE_G(x) \
501 (((x) >> TCPOPT_SND_WSCALE_S) & TCPOPT_SND_WSCALE_M)
502
503#define TCPOPT_MSS_S 12
504#define TCPOPT_MSS_M 0xF
505#define TCPOPT_MSS_G(x) (((x) >> TCPOPT_MSS_S) & TCPOPT_MSS_M)
506
411struct cpl_act_establish { 507struct cpl_act_establish {
412 union opcode_tid ot; 508 union opcode_tid ot;
413 __be32 rsvd; 509 __be32 rsvd;
@@ -422,24 +518,39 @@ struct cpl_get_tcb {
422 WR_HDR; 518 WR_HDR;
423 union opcode_tid ot; 519 union opcode_tid ot;
424 __be16 reply_ctrl; 520 __be16 reply_ctrl;
425#define QUEUENO(x) ((x) << 0)
426#define REPLY_CHAN(x) ((x) << 14)
427#define NO_REPLY(x) ((x) << 15)
428 __be16 cookie; 521 __be16 cookie;
429}; 522};
430 523
524/* cpl_get_tcb.reply_ctrl fields */
525#define QUEUENO_S 0
526#define QUEUENO_V(x) ((x) << QUEUENO_S)
527
528#define REPLY_CHAN_S 14
529#define REPLY_CHAN_V(x) ((x) << REPLY_CHAN_S)
530#define REPLY_CHAN_F REPLY_CHAN_V(1U)
531
532#define NO_REPLY_S 15
533#define NO_REPLY_V(x) ((x) << NO_REPLY_S)
534#define NO_REPLY_F NO_REPLY_V(1U)
535
431struct cpl_set_tcb_field { 536struct cpl_set_tcb_field {
432 WR_HDR; 537 WR_HDR;
433 union opcode_tid ot; 538 union opcode_tid ot;
434 __be16 reply_ctrl; 539 __be16 reply_ctrl;
435 __be16 word_cookie; 540 __be16 word_cookie;
436#define TCB_WORD(x) ((x) << 0)
437#define TCB_COOKIE(x) ((x) << 5)
438#define GET_TCB_COOKIE(x) (((x) >> 5) & 7)
439 __be64 mask; 541 __be64 mask;
440 __be64 val; 542 __be64 val;
441}; 543};
442 544
545/* cpl_set_tcb_field.word_cookie fields */
546#define TCB_WORD_S 0
547#define TCB_WORD(x) ((x) << TCB_WORD_S)
548
549#define TCB_COOKIE_S 5
550#define TCB_COOKIE_M 0x7
551#define TCB_COOKIE_V(x) ((x) << TCB_COOKIE_S)
552#define TCB_COOKIE_G(x) (((x) >> TCB_COOKIE_S) & TCB_COOKIE_M)
553
443struct cpl_set_tcb_rpl { 554struct cpl_set_tcb_rpl {
444 union opcode_tid ot; 555 union opcode_tid ot;
445 __be16 rsvd; 556 __be16 rsvd;
@@ -466,10 +577,14 @@ struct cpl_close_listsvr_req {
466 WR_HDR; 577 WR_HDR;
467 union opcode_tid ot; 578 union opcode_tid ot;
468 __be16 reply_ctrl; 579 __be16 reply_ctrl;
469#define LISTSVR_IPV6(x) ((x) << 14)
470 __be16 rsvd; 580 __be16 rsvd;
471}; 581};
472 582
583/* additional cpl_close_listsvr_req.reply_ctrl field */
584#define LISTSVR_IPV6_S 14
585#define LISTSVR_IPV6_V(x) ((x) << LISTSVR_IPV6_S)
586#define LISTSVR_IPV6_F LISTSVR_IPV6_V(1U)
587
473struct cpl_close_listsvr_rpl { 588struct cpl_close_listsvr_rpl {
474 union opcode_tid ot; 589 union opcode_tid ot;
475 u8 rsvd[3]; 590 u8 rsvd[3];
@@ -565,6 +680,34 @@ struct cpl_tx_pkt_lso_core {
565 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ 680 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
566}; 681};
567 682
683/* cpl_tx_pkt_lso_core.lso_ctrl fields */
684#define LSO_TCPHDR_LEN_S 0
685#define LSO_TCPHDR_LEN_V(x) ((x) << LSO_TCPHDR_LEN_S)
686
687#define LSO_IPHDR_LEN_S 4
688#define LSO_IPHDR_LEN_V(x) ((x) << LSO_IPHDR_LEN_S)
689
690#define LSO_ETHHDR_LEN_S 16
691#define LSO_ETHHDR_LEN_V(x) ((x) << LSO_ETHHDR_LEN_S)
692
693#define LSO_IPV6_S 20
694#define LSO_IPV6_V(x) ((x) << LSO_IPV6_S)
695#define LSO_IPV6_F LSO_IPV6_V(1U)
696
697#define LSO_LAST_SLICE_S 22
698#define LSO_LAST_SLICE_V(x) ((x) << LSO_LAST_SLICE_S)
699#define LSO_LAST_SLICE_F LSO_LAST_SLICE_V(1U)
700
701#define LSO_FIRST_SLICE_S 23
702#define LSO_FIRST_SLICE_V(x) ((x) << LSO_FIRST_SLICE_S)
703#define LSO_FIRST_SLICE_F LSO_FIRST_SLICE_V(1U)
704
705#define LSO_OPCODE_S 24
706#define LSO_OPCODE_V(x) ((x) << LSO_OPCODE_S)
707
708#define LSO_T5_XFER_SIZE_S 0
709#define LSO_T5_XFER_SIZE_V(x) ((x) << LSO_T5_XFER_SIZE_S)
710
568struct cpl_tx_pkt_lso { 711struct cpl_tx_pkt_lso {
569 WR_HDR; 712 WR_HDR;
570 struct cpl_tx_pkt_lso_core c; 713 struct cpl_tx_pkt_lso_core c;
@@ -574,8 +717,6 @@ struct cpl_tx_pkt_lso {
574struct cpl_iscsi_hdr { 717struct cpl_iscsi_hdr {
575 union opcode_tid ot; 718 union opcode_tid ot;
576 __be16 pdu_len_ddp; 719 __be16 pdu_len_ddp;
577#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
578#define ISCSI_DDP (1 << 15)
579 __be16 len; 720 __be16 len;
580 __be32 seq; 721 __be32 seq;
581 __be16 urg; 722 __be16 urg;
@@ -583,6 +724,16 @@ struct cpl_iscsi_hdr {
583 u8 status; 724 u8 status;
584}; 725};
585 726
727/* cpl_iscsi_hdr.pdu_len_ddp fields */
728#define ISCSI_PDU_LEN_S 0
729#define ISCSI_PDU_LEN_M 0x7FFF
730#define ISCSI_PDU_LEN_V(x) ((x) << ISCSI_PDU_LEN_S)
731#define ISCSI_PDU_LEN_G(x) (((x) >> ISCSI_PDU_LEN_S) & ISCSI_PDU_LEN_M)
732
733#define ISCSI_DDP_S 15
734#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
735#define ISCSI_DDP_F ISCSI_DDP_V(1U)
736
586struct cpl_rx_data { 737struct cpl_rx_data {
587 union opcode_tid ot; 738 union opcode_tid ot;
588 __be16 rsvd; 739 __be16 rsvd;
@@ -639,49 +790,61 @@ struct cpl_rx_pkt {
639 __be16 vlan; 790 __be16 vlan;
640 __be16 len; 791 __be16 len;
641 __be32 l2info; 792 __be32 l2info;
642#define RXF_UDP (1 << 22)
643#define RXF_TCP (1 << 23)
644#define RXF_IP (1 << 24)
645#define RXF_IP6 (1 << 25)
646 __be16 hdr_len; 793 __be16 hdr_len;
647 __be16 err_vec; 794 __be16 err_vec;
648}; 795};
649 796
797#define RXF_UDP_S 22
798#define RXF_UDP_V(x) ((x) << RXF_UDP_S)
799#define RXF_UDP_F RXF_UDP_V(1U)
800
801#define RXF_TCP_S 23
802#define RXF_TCP_V(x) ((x) << RXF_TCP_S)
803#define RXF_TCP_F RXF_TCP_V(1U)
804
805#define RXF_IP_S 24
806#define RXF_IP_V(x) ((x) << RXF_IP_S)
807#define RXF_IP_F RXF_IP_V(1U)
808
809#define RXF_IP6_S 25
810#define RXF_IP6_V(x) ((x) << RXF_IP6_S)
811#define RXF_IP6_F RXF_IP6_V(1U)
812
650/* rx_pkt.l2info fields */ 813/* rx_pkt.l2info fields */
651#define S_RX_ETHHDR_LEN 0 814#define RX_ETHHDR_LEN_S 0
652#define M_RX_ETHHDR_LEN 0x1F 815#define RX_ETHHDR_LEN_M 0x1F
653#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN) 816#define RX_ETHHDR_LEN_V(x) ((x) << RX_ETHHDR_LEN_S)
654#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN) 817#define RX_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_ETHHDR_LEN_M)
655 818
656#define S_RX_T5_ETHHDR_LEN 0 819#define RX_T5_ETHHDR_LEN_S 0
657#define M_RX_T5_ETHHDR_LEN 0x3F 820#define RX_T5_ETHHDR_LEN_M 0x3F
658#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN) 821#define RX_T5_ETHHDR_LEN_V(x) ((x) << RX_T5_ETHHDR_LEN_S)
659#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN) 822#define RX_T5_ETHHDR_LEN_G(x) (((x) >> RX_T5_ETHHDR_LEN_S) & RX_T5_ETHHDR_LEN_M)
660 823
661#define S_RX_MACIDX 8 824#define RX_MACIDX_S 8
662#define M_RX_MACIDX 0x1FF 825#define RX_MACIDX_M 0x1FF
663#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX) 826#define RX_MACIDX_V(x) ((x) << RX_MACIDX_S)
664#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX) 827#define RX_MACIDX_G(x) (((x) >> RX_MACIDX_S) & RX_MACIDX_M)
665 828
666#define S_RXF_SYN 21 829#define RXF_SYN_S 21
667#define V_RXF_SYN(x) ((x) << S_RXF_SYN) 830#define RXF_SYN_V(x) ((x) << RXF_SYN_S)
668#define F_RXF_SYN V_RXF_SYN(1U) 831#define RXF_SYN_F RXF_SYN_V(1U)
669 832
670#define S_RX_CHAN 28 833#define RX_CHAN_S 28
671#define M_RX_CHAN 0xF 834#define RX_CHAN_M 0xF
672#define V_RX_CHAN(x) ((x) << S_RX_CHAN) 835#define RX_CHAN_V(x) ((x) << RX_CHAN_S)
673#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN) 836#define RX_CHAN_G(x) (((x) >> RX_CHAN_S) & RX_CHAN_M)
674 837
675/* rx_pkt.hdr_len fields */ 838/* rx_pkt.hdr_len fields */
676#define S_RX_TCPHDR_LEN 0 839#define RX_TCPHDR_LEN_S 0
677#define M_RX_TCPHDR_LEN 0x3F 840#define RX_TCPHDR_LEN_M 0x3F
678#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN) 841#define RX_TCPHDR_LEN_V(x) ((x) << RX_TCPHDR_LEN_S)
679#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN) 842#define RX_TCPHDR_LEN_G(x) (((x) >> RX_TCPHDR_LEN_S) & RX_TCPHDR_LEN_M)
680 843
681#define S_RX_IPHDR_LEN 6 844#define RX_IPHDR_LEN_S 6
682#define M_RX_IPHDR_LEN 0x3FF 845#define RX_IPHDR_LEN_M 0x3FF
683#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN) 846#define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S)
684#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN) 847#define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M)
685 848
686struct cpl_trace_pkt { 849struct cpl_trace_pkt {
687 u8 opcode; 850 u8 opcode;
@@ -730,14 +893,22 @@ struct cpl_l2t_write_req {
730 WR_HDR; 893 WR_HDR;
731 union opcode_tid ot; 894 union opcode_tid ot;
732 __be16 params; 895 __be16 params;
733#define L2T_W_INFO(x) ((x) << 2)
734#define L2T_W_PORT(x) ((x) << 8)
735#define L2T_W_NOREPLY(x) ((x) << 15)
736 __be16 l2t_idx; 896 __be16 l2t_idx;
737 __be16 vlan; 897 __be16 vlan;
738 u8 dst_mac[6]; 898 u8 dst_mac[6];
739}; 899};
740 900
901/* cpl_l2t_write_req.params fields */
902#define L2T_W_INFO_S 2
903#define L2T_W_INFO_V(x) ((x) << L2T_W_INFO_S)
904
905#define L2T_W_PORT_S 8
906#define L2T_W_PORT_V(x) ((x) << L2T_W_PORT_S)
907
908#define L2T_W_NOREPLY_S 15
909#define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S)
910#define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U)
911
741struct cpl_l2t_write_rpl { 912struct cpl_l2t_write_rpl {
742 union opcode_tid ot; 913 union opcode_tid ot;
743 u8 status; 914 u8 status;
@@ -752,11 +923,15 @@ struct cpl_rdma_terminate {
752 923
753struct cpl_sge_egr_update { 924struct cpl_sge_egr_update {
754 __be32 opcode_qid; 925 __be32 opcode_qid;
755#define EGR_QID(x) ((x) & 0x1FFFF)
756 __be16 cidx; 926 __be16 cidx;
757 __be16 pidx; 927 __be16 pidx;
758}; 928};
759 929
930/* cpl_sge_egr_update.ot fields */
931#define EGR_QID_S 0
932#define EGR_QID_M 0x1FFFF
933#define EGR_QID_G(x) (((x) >> EGR_QID_S) & EGR_QID_M)
934
760/* cpl_fw*.type values */ 935/* cpl_fw*.type values */
761enum { 936enum {
762 FW_TYPE_CMD_RPL = 0, 937 FW_TYPE_CMD_RPL = 0,
@@ -849,22 +1024,30 @@ struct ulptx_sge_pair {
849 1024
850struct ulptx_sgl { 1025struct ulptx_sgl {
851 __be32 cmd_nsge; 1026 __be32 cmd_nsge;
852#define ULPTX_NSGE(x) ((x) << 0)
853#define ULPTX_MORE (1U << 23)
854 __be32 len0; 1027 __be32 len0;
855 __be64 addr0; 1028 __be64 addr0;
856 struct ulptx_sge_pair sge[0]; 1029 struct ulptx_sge_pair sge[0];
857}; 1030};
858 1031
1032#define ULPTX_NSGE_S 0
1033#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
1034
1035#define ULPTX_MORE_S 23
1036#define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S)
1037#define ULPTX_MORE_F ULPTX_MORE_V(1U)
1038
859struct ulp_mem_io { 1039struct ulp_mem_io {
860 WR_HDR; 1040 WR_HDR;
861 __be32 cmd; 1041 __be32 cmd;
862 __be32 len16; /* command length */ 1042 __be32 len16; /* command length */
863 __be32 dlen; /* data length in 32-byte units */ 1043 __be32 dlen; /* data length in 32-byte units */
864 __be32 lock_addr; 1044 __be32 lock_addr;
865#define ULP_MEMIO_LOCK(x) ((x) << 31)
866}; 1045};
867 1046
1047#define ULP_MEMIO_LOCK_S 31
1048#define ULP_MEMIO_LOCK_V(x) ((x) << ULP_MEMIO_LOCK_S)
1049#define ULP_MEMIO_LOCK_F ULP_MEMIO_LOCK_V(1U)
1050
868/* additional ulp_mem_io.cmd fields */ 1051/* additional ulp_mem_io.cmd fields */
869#define ULP_MEMIO_ORDER_S 23 1052#define ULP_MEMIO_ORDER_S 23
870#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S) 1053#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S)
@@ -874,13 +1057,9 @@ struct ulp_mem_io {
874#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S) 1057#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S)
875#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U) 1058#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U)
876 1059
877#define S_T5_ULP_MEMIO_IMM 23 1060#define T5_ULP_MEMIO_ORDER_S 22
878#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM) 1061#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
879#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U) 1062#define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U)
880
881#define S_T5_ULP_MEMIO_ORDER 22
882#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
883#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
884 1063
885/* ulp_mem_io.lock_addr fields */ 1064/* ulp_mem_io.lock_addr fields */
886#define ULP_MEMIO_ADDR_S 0 1065#define ULP_MEMIO_ADDR_S 0
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 9e4f95a91fb4..ddfb5b846045 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -153,6 +153,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
153 CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */ 153 CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
154 CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ 154 CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
155 CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ 155 CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
156 CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
156CH_PCI_DEVICE_ID_TABLE_DEFINE_END; 157CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
157 158
158#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */ 159#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index d7bd34ee65bd..231a725f6d5d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -63,460 +63,779 @@
63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) 63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) 64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
65 65
66#define SGE_PF_KDOORBELL 0x0 66#define SGE_PF_KDOORBELL_A 0x0
67#define QID_MASK 0xffff8000U 67
68#define QID_SHIFT 15 68#define QID_S 15
69#define QID(x) ((x) << QID_SHIFT) 69#define QID_V(x) ((x) << QID_S)
70#define DBPRIO(x) ((x) << 14) 70
71#define DBTYPE(x) ((x) << 13) 71#define DBPRIO_S 14
72#define PIDX_MASK 0x00003fffU 72#define DBPRIO_V(x) ((x) << DBPRIO_S)
73#define PIDX_SHIFT 0 73#define DBPRIO_F DBPRIO_V(1U)
74#define PIDX(x) ((x) << PIDX_SHIFT) 74
75#define PIDX_SHIFT_T5 0 75#define PIDX_S 0
76#define PIDX_T5(x) ((x) << PIDX_SHIFT_T5) 76#define PIDX_V(x) ((x) << PIDX_S)
77 77
78 78#define SGE_VF_KDOORBELL_A 0x0
79#define SGE_TIMERREGS 6 79
80#define SGE_PF_GTS 0x4 80#define DBTYPE_S 13
81#define INGRESSQID_MASK 0xffff0000U 81#define DBTYPE_V(x) ((x) << DBTYPE_S)
82#define INGRESSQID_SHIFT 16 82#define DBTYPE_F DBTYPE_V(1U)
83#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT) 83
84#define TIMERREG_MASK 0x0000e000U 84#define PIDX_T5_S 0
85#define TIMERREG_SHIFT 13 85#define PIDX_T5_M 0x1fffU
86#define TIMERREG(x) ((x) << TIMERREG_SHIFT) 86#define PIDX_T5_V(x) ((x) << PIDX_T5_S)
87#define SEINTARM_MASK 0x00001000U 87#define PIDX_T5_G(x) (((x) >> PIDX_T5_S) & PIDX_T5_M)
88#define SEINTARM_SHIFT 12 88
89#define SEINTARM(x) ((x) << SEINTARM_SHIFT) 89#define SGE_PF_GTS_A 0x4
90#define CIDXINC_MASK 0x00000fffU 90
91#define CIDXINC_SHIFT 0 91#define INGRESSQID_S 16
92#define CIDXINC(x) ((x) << CIDXINC_SHIFT) 92#define INGRESSQID_V(x) ((x) << INGRESSQID_S)
93 93
94#define X_RXPKTCPLMODE_SPLIT 1 94#define TIMERREG_S 13
95#define X_INGPADBOUNDARY_SHIFT 5 95#define TIMERREG_V(x) ((x) << TIMERREG_S)
96 96
97#define SGE_CONTROL 0x1008 97#define SEINTARM_S 12
98#define SGE_CONTROL2_A 0x1124 98#define SEINTARM_V(x) ((x) << SEINTARM_S)
99#define DCASYSTYPE 0x00080000U 99
100#define RXPKTCPLMODE_MASK 0x00040000U 100#define CIDXINC_S 0
101#define RXPKTCPLMODE_SHIFT 18 101#define CIDXINC_M 0xfffU
102#define RXPKTCPLMODE(x) ((x) << RXPKTCPLMODE_SHIFT) 102#define CIDXINC_V(x) ((x) << CIDXINC_S)
103#define EGRSTATUSPAGESIZE_MASK 0x00020000U 103
104#define EGRSTATUSPAGESIZE_SHIFT 17 104#define SGE_CONTROL_A 0x1008
105#define EGRSTATUSPAGESIZE(x) ((x) << EGRSTATUSPAGESIZE_SHIFT) 105#define SGE_CONTROL2_A 0x1124
106#define PKTSHIFT_MASK 0x00001c00U 106
107#define PKTSHIFT_SHIFT 10 107#define RXPKTCPLMODE_S 18
108#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) 108#define RXPKTCPLMODE_V(x) ((x) << RXPKTCPLMODE_S)
109#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) 109#define RXPKTCPLMODE_F RXPKTCPLMODE_V(1U)
110#define INGPCIEBOUNDARY_32B_X 0 110
111#define INGPCIEBOUNDARY_MASK 0x00000380U 111#define EGRSTATUSPAGESIZE_S 17
112#define INGPCIEBOUNDARY_SHIFT 7 112#define EGRSTATUSPAGESIZE_V(x) ((x) << EGRSTATUSPAGESIZE_S)
113#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) 113#define EGRSTATUSPAGESIZE_F EGRSTATUSPAGESIZE_V(1U)
114#define INGPADBOUNDARY_MASK 0x00000070U 114
115#define INGPADBOUNDARY_SHIFT 4 115#define PKTSHIFT_S 10
116#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) 116#define PKTSHIFT_M 0x7U
117#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ 117#define PKTSHIFT_V(x) ((x) << PKTSHIFT_S)
118 >> INGPADBOUNDARY_SHIFT) 118#define PKTSHIFT_G(x) (((x) >> PKTSHIFT_S) & PKTSHIFT_M)
119#define INGPACKBOUNDARY_16B_X 0 119
120#define INGPACKBOUNDARY_SHIFT_X 5 120#define INGPCIEBOUNDARY_S 7
121#define INGPCIEBOUNDARY_V(x) ((x) << INGPCIEBOUNDARY_S)
122
123#define INGPADBOUNDARY_S 4
124#define INGPADBOUNDARY_M 0x7U
125#define INGPADBOUNDARY_V(x) ((x) << INGPADBOUNDARY_S)
126#define INGPADBOUNDARY_G(x) (((x) >> INGPADBOUNDARY_S) & INGPADBOUNDARY_M)
127
128#define EGRPCIEBOUNDARY_S 1
129#define EGRPCIEBOUNDARY_V(x) ((x) << EGRPCIEBOUNDARY_S)
121 130
122#define INGPACKBOUNDARY_S 16 131#define INGPACKBOUNDARY_S 16
123#define INGPACKBOUNDARY_M 0x7U 132#define INGPACKBOUNDARY_M 0x7U
124#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S) 133#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
125#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \ 134#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
126 & INGPACKBOUNDARY_M) 135 & INGPACKBOUNDARY_M)
127#define EGRPCIEBOUNDARY_MASK 0x0000000eU
128#define EGRPCIEBOUNDARY_SHIFT 1
129#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
130#define GLOBALENABLE 0x00000001U
131 136
132#define SGE_HOST_PAGE_SIZE 0x100c 137#define GLOBALENABLE_S 0
138#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
139#define GLOBALENABLE_F GLOBALENABLE_V(1U)
140
141#define SGE_HOST_PAGE_SIZE_A 0x100c
142
143#define HOSTPAGESIZEPF7_S 28
144#define HOSTPAGESIZEPF7_M 0xfU
145#define HOSTPAGESIZEPF7_V(x) ((x) << HOSTPAGESIZEPF7_S)
146#define HOSTPAGESIZEPF7_G(x) (((x) >> HOSTPAGESIZEPF7_S) & HOSTPAGESIZEPF7_M)
147
148#define HOSTPAGESIZEPF6_S 24
149#define HOSTPAGESIZEPF6_M 0xfU
150#define HOSTPAGESIZEPF6_V(x) ((x) << HOSTPAGESIZEPF6_S)
151#define HOSTPAGESIZEPF6_G(x) (((x) >> HOSTPAGESIZEPF6_S) & HOSTPAGESIZEPF6_M)
152
153#define HOSTPAGESIZEPF5_S 20
154#define HOSTPAGESIZEPF5_M 0xfU
155#define HOSTPAGESIZEPF5_V(x) ((x) << HOSTPAGESIZEPF5_S)
156#define HOSTPAGESIZEPF5_G(x) (((x) >> HOSTPAGESIZEPF5_S) & HOSTPAGESIZEPF5_M)
157
158#define HOSTPAGESIZEPF4_S 16
159#define HOSTPAGESIZEPF4_M 0xfU
160#define HOSTPAGESIZEPF4_V(x) ((x) << HOSTPAGESIZEPF4_S)
161#define HOSTPAGESIZEPF4_G(x) (((x) >> HOSTPAGESIZEPF4_S) & HOSTPAGESIZEPF4_M)
162
163#define HOSTPAGESIZEPF3_S 12
164#define HOSTPAGESIZEPF3_M 0xfU
165#define HOSTPAGESIZEPF3_V(x) ((x) << HOSTPAGESIZEPF3_S)
166#define HOSTPAGESIZEPF3_G(x) (((x) >> HOSTPAGESIZEPF3_S) & HOSTPAGESIZEPF3_M)
167
168#define HOSTPAGESIZEPF2_S 8
169#define HOSTPAGESIZEPF2_M 0xfU
170#define HOSTPAGESIZEPF2_V(x) ((x) << HOSTPAGESIZEPF2_S)
171#define HOSTPAGESIZEPF2_G(x) (((x) >> HOSTPAGESIZEPF2_S) & HOSTPAGESIZEPF2_M)
172
173#define HOSTPAGESIZEPF1_S 4
174#define HOSTPAGESIZEPF1_M 0xfU
175#define HOSTPAGESIZEPF1_V(x) ((x) << HOSTPAGESIZEPF1_S)
176#define HOSTPAGESIZEPF1_G(x) (((x) >> HOSTPAGESIZEPF1_S) & HOSTPAGESIZEPF1_M)
177
178#define HOSTPAGESIZEPF0_S 0
179#define HOSTPAGESIZEPF0_M 0xfU
180#define HOSTPAGESIZEPF0_V(x) ((x) << HOSTPAGESIZEPF0_S)
181#define HOSTPAGESIZEPF0_G(x) (((x) >> HOSTPAGESIZEPF0_S) & HOSTPAGESIZEPF0_M)
182
183#define SGE_EGRESS_QUEUES_PER_PAGE_PF_A 0x1010
184#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014
133 185
134#define HOSTPAGESIZEPF7_MASK 0x0000000fU 186#define QUEUESPERPAGEPF1_S 4
135#define HOSTPAGESIZEPF7_SHIFT 28
136#define HOSTPAGESIZEPF7(x) ((x) << HOSTPAGESIZEPF7_SHIFT)
137 187
138#define HOSTPAGESIZEPF6_MASK 0x0000000fU 188#define QUEUESPERPAGEPF0_S 0
139#define HOSTPAGESIZEPF6_SHIFT 24 189#define QUEUESPERPAGEPF0_M 0xfU
140#define HOSTPAGESIZEPF6(x) ((x) << HOSTPAGESIZEPF6_SHIFT) 190#define QUEUESPERPAGEPF0_V(x) ((x) << QUEUESPERPAGEPF0_S)
191#define QUEUESPERPAGEPF0_G(x) (((x) >> QUEUESPERPAGEPF0_S) & QUEUESPERPAGEPF0_M)
141 192
142#define HOSTPAGESIZEPF5_MASK 0x0000000fU 193#define SGE_INT_CAUSE1_A 0x1024
143#define HOSTPAGESIZEPF5_SHIFT 20 194#define SGE_INT_CAUSE2_A 0x1030
144#define HOSTPAGESIZEPF5(x) ((x) << HOSTPAGESIZEPF5_SHIFT) 195#define SGE_INT_CAUSE3_A 0x103c
196
197#define ERR_FLM_DBP_S 31
198#define ERR_FLM_DBP_V(x) ((x) << ERR_FLM_DBP_S)
199#define ERR_FLM_DBP_F ERR_FLM_DBP_V(1U)
200
201#define ERR_FLM_IDMA1_S 30
202#define ERR_FLM_IDMA1_V(x) ((x) << ERR_FLM_IDMA1_S)
203#define ERR_FLM_IDMA1_F ERR_FLM_IDMA1_V(1U)
204
205#define ERR_FLM_IDMA0_S 29
206#define ERR_FLM_IDMA0_V(x) ((x) << ERR_FLM_IDMA0_S)
207#define ERR_FLM_IDMA0_F ERR_FLM_IDMA0_V(1U)
208
209#define ERR_FLM_HINT_S 28
210#define ERR_FLM_HINT_V(x) ((x) << ERR_FLM_HINT_S)
211#define ERR_FLM_HINT_F ERR_FLM_HINT_V(1U)
212
213#define ERR_PCIE_ERROR3_S 27
214#define ERR_PCIE_ERROR3_V(x) ((x) << ERR_PCIE_ERROR3_S)
215#define ERR_PCIE_ERROR3_F ERR_PCIE_ERROR3_V(1U)
216
217#define ERR_PCIE_ERROR2_S 26
218#define ERR_PCIE_ERROR2_V(x) ((x) << ERR_PCIE_ERROR2_S)
219#define ERR_PCIE_ERROR2_F ERR_PCIE_ERROR2_V(1U)
220
221#define ERR_PCIE_ERROR1_S 25
222#define ERR_PCIE_ERROR1_V(x) ((x) << ERR_PCIE_ERROR1_S)
223#define ERR_PCIE_ERROR1_F ERR_PCIE_ERROR1_V(1U)
224
225#define ERR_PCIE_ERROR0_S 24
226#define ERR_PCIE_ERROR0_V(x) ((x) << ERR_PCIE_ERROR0_S)
227#define ERR_PCIE_ERROR0_F ERR_PCIE_ERROR0_V(1U)
228
229#define ERR_CPL_EXCEED_IQE_SIZE_S 22
230#define ERR_CPL_EXCEED_IQE_SIZE_V(x) ((x) << ERR_CPL_EXCEED_IQE_SIZE_S)
231#define ERR_CPL_EXCEED_IQE_SIZE_F ERR_CPL_EXCEED_IQE_SIZE_V(1U)
232
233#define ERR_INVALID_CIDX_INC_S 21
234#define ERR_INVALID_CIDX_INC_V(x) ((x) << ERR_INVALID_CIDX_INC_S)
235#define ERR_INVALID_CIDX_INC_F ERR_INVALID_CIDX_INC_V(1U)
236
237#define ERR_CPL_OPCODE_0_S 19
238#define ERR_CPL_OPCODE_0_V(x) ((x) << ERR_CPL_OPCODE_0_S)
239#define ERR_CPL_OPCODE_0_F ERR_CPL_OPCODE_0_V(1U)
240
241#define ERR_DROPPED_DB_S 18
242#define ERR_DROPPED_DB_V(x) ((x) << ERR_DROPPED_DB_S)
243#define ERR_DROPPED_DB_F ERR_DROPPED_DB_V(1U)
244
245#define ERR_DATA_CPL_ON_HIGH_QID1_S 17
246#define ERR_DATA_CPL_ON_HIGH_QID1_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID1_S)
247#define ERR_DATA_CPL_ON_HIGH_QID1_F ERR_DATA_CPL_ON_HIGH_QID1_V(1U)
248
249#define ERR_DATA_CPL_ON_HIGH_QID0_S 16
250#define ERR_DATA_CPL_ON_HIGH_QID0_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID0_S)
251#define ERR_DATA_CPL_ON_HIGH_QID0_F ERR_DATA_CPL_ON_HIGH_QID0_V(1U)
252
253#define ERR_BAD_DB_PIDX3_S 15
254#define ERR_BAD_DB_PIDX3_V(x) ((x) << ERR_BAD_DB_PIDX3_S)
255#define ERR_BAD_DB_PIDX3_F ERR_BAD_DB_PIDX3_V(1U)
256
257#define ERR_BAD_DB_PIDX2_S 14
258#define ERR_BAD_DB_PIDX2_V(x) ((x) << ERR_BAD_DB_PIDX2_S)
259#define ERR_BAD_DB_PIDX2_F ERR_BAD_DB_PIDX2_V(1U)
260
261#define ERR_BAD_DB_PIDX1_S 13
262#define ERR_BAD_DB_PIDX1_V(x) ((x) << ERR_BAD_DB_PIDX1_S)
263#define ERR_BAD_DB_PIDX1_F ERR_BAD_DB_PIDX1_V(1U)
264
265#define ERR_BAD_DB_PIDX0_S 12
266#define ERR_BAD_DB_PIDX0_V(x) ((x) << ERR_BAD_DB_PIDX0_S)
267#define ERR_BAD_DB_PIDX0_F ERR_BAD_DB_PIDX0_V(1U)
268
269#define ERR_ING_CTXT_PRIO_S 10
270#define ERR_ING_CTXT_PRIO_V(x) ((x) << ERR_ING_CTXT_PRIO_S)
271#define ERR_ING_CTXT_PRIO_F ERR_ING_CTXT_PRIO_V(1U)
272
273#define ERR_EGR_CTXT_PRIO_S 9
274#define ERR_EGR_CTXT_PRIO_V(x) ((x) << ERR_EGR_CTXT_PRIO_S)
275#define ERR_EGR_CTXT_PRIO_F ERR_EGR_CTXT_PRIO_V(1U)
276
277#define DBFIFO_HP_INT_S 8
278#define DBFIFO_HP_INT_V(x) ((x) << DBFIFO_HP_INT_S)
279#define DBFIFO_HP_INT_F DBFIFO_HP_INT_V(1U)
280
281#define DBFIFO_LP_INT_S 7
282#define DBFIFO_LP_INT_V(x) ((x) << DBFIFO_LP_INT_S)
283#define DBFIFO_LP_INT_F DBFIFO_LP_INT_V(1U)
284
285#define INGRESS_SIZE_ERR_S 5
286#define INGRESS_SIZE_ERR_V(x) ((x) << INGRESS_SIZE_ERR_S)
287#define INGRESS_SIZE_ERR_F INGRESS_SIZE_ERR_V(1U)
288
289#define EGRESS_SIZE_ERR_S 4
290#define EGRESS_SIZE_ERR_V(x) ((x) << EGRESS_SIZE_ERR_S)
291#define EGRESS_SIZE_ERR_F EGRESS_SIZE_ERR_V(1U)
292
293#define SGE_INT_ENABLE3_A 0x1040
294#define SGE_FL_BUFFER_SIZE0_A 0x1044
295#define SGE_FL_BUFFER_SIZE1_A 0x1048
296#define SGE_FL_BUFFER_SIZE2_A 0x104c
297#define SGE_FL_BUFFER_SIZE3_A 0x1050
298#define SGE_FL_BUFFER_SIZE4_A 0x1054
299#define SGE_FL_BUFFER_SIZE5_A 0x1058
300#define SGE_FL_BUFFER_SIZE6_A 0x105c
301#define SGE_FL_BUFFER_SIZE7_A 0x1060
302#define SGE_FL_BUFFER_SIZE8_A 0x1064
303
304#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
305
306#define THRESHOLD_0_S 24
307#define THRESHOLD_0_M 0x3fU
308#define THRESHOLD_0_V(x) ((x) << THRESHOLD_0_S)
309#define THRESHOLD_0_G(x) (((x) >> THRESHOLD_0_S) & THRESHOLD_0_M)
310
311#define THRESHOLD_1_S 16
312#define THRESHOLD_1_M 0x3fU
313#define THRESHOLD_1_V(x) ((x) << THRESHOLD_1_S)
314#define THRESHOLD_1_G(x) (((x) >> THRESHOLD_1_S) & THRESHOLD_1_M)
315
316#define THRESHOLD_2_S 8
317#define THRESHOLD_2_M 0x3fU
318#define THRESHOLD_2_V(x) ((x) << THRESHOLD_2_S)
319#define THRESHOLD_2_G(x) (((x) >> THRESHOLD_2_S) & THRESHOLD_2_M)
320
321#define THRESHOLD_3_S 0
322#define THRESHOLD_3_M 0x3fU
323#define THRESHOLD_3_V(x) ((x) << THRESHOLD_3_S)
324#define THRESHOLD_3_G(x) (((x) >> THRESHOLD_3_S) & THRESHOLD_3_M)
325
326#define SGE_CONM_CTRL_A 0x1094
327
328#define EGRTHRESHOLD_S 8
329#define EGRTHRESHOLD_M 0x3fU
330#define EGRTHRESHOLD_V(x) ((x) << EGRTHRESHOLD_S)
331#define EGRTHRESHOLD_G(x) (((x) >> EGRTHRESHOLD_S) & EGRTHRESHOLD_M)
332
333#define EGRTHRESHOLDPACKING_S 14
334#define EGRTHRESHOLDPACKING_M 0x3fU
335#define EGRTHRESHOLDPACKING_V(x) ((x) << EGRTHRESHOLDPACKING_S)
336#define EGRTHRESHOLDPACKING_G(x) \
337 (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
338
339#define SGE_TIMESTAMP_LO_A 0x1098
340#define SGE_TIMESTAMP_HI_A 0x109c
341
342#define TSOP_S 28
343#define TSOP_M 0x3U
344#define TSOP_V(x) ((x) << TSOP_S)
345#define TSOP_G(x) (((x) >> TSOP_S) & TSOP_M)
346
347#define TSVAL_S 0
348#define TSVAL_M 0xfffffffU
349#define TSVAL_V(x) ((x) << TSVAL_S)
350#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
351
352#define SGE_DBFIFO_STATUS_A 0x10a4
353
354#define HP_INT_THRESH_S 28
355#define HP_INT_THRESH_M 0xfU
356#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
357
358#define LP_INT_THRESH_S 12
359#define LP_INT_THRESH_M 0xfU
360#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
361
362#define SGE_DOORBELL_CONTROL_A 0x10a8
363
364#define NOCOALESCE_S 26
365#define NOCOALESCE_V(x) ((x) << NOCOALESCE_S)
366#define NOCOALESCE_F NOCOALESCE_V(1U)
367
368#define ENABLE_DROP_S 13
369#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
370#define ENABLE_DROP_F ENABLE_DROP_V(1U)
371
372#define SGE_TIMER_VALUE_0_AND_1_A 0x10b8
373
374#define TIMERVALUE0_S 16
375#define TIMERVALUE0_M 0xffffU
376#define TIMERVALUE0_V(x) ((x) << TIMERVALUE0_S)
377#define TIMERVALUE0_G(x) (((x) >> TIMERVALUE0_S) & TIMERVALUE0_M)
378
379#define TIMERVALUE1_S 0
380#define TIMERVALUE1_M 0xffffU
381#define TIMERVALUE1_V(x) ((x) << TIMERVALUE1_S)
382#define TIMERVALUE1_G(x) (((x) >> TIMERVALUE1_S) & TIMERVALUE1_M)
383
384#define SGE_TIMER_VALUE_2_AND_3_A 0x10bc
385
386#define TIMERVALUE2_S 16
387#define TIMERVALUE2_M 0xffffU
388#define TIMERVALUE2_V(x) ((x) << TIMERVALUE2_S)
389#define TIMERVALUE2_G(x) (((x) >> TIMERVALUE2_S) & TIMERVALUE2_M)
390
391#define TIMERVALUE3_S 0
392#define TIMERVALUE3_M 0xffffU
393#define TIMERVALUE3_V(x) ((x) << TIMERVALUE3_S)
394#define TIMERVALUE3_G(x) (((x) >> TIMERVALUE3_S) & TIMERVALUE3_M)
395
396#define SGE_TIMER_VALUE_4_AND_5_A 0x10c0
397
398#define TIMERVALUE4_S 16
399#define TIMERVALUE4_M 0xffffU
400#define TIMERVALUE4_V(x) ((x) << TIMERVALUE4_S)
401#define TIMERVALUE4_G(x) (((x) >> TIMERVALUE4_S) & TIMERVALUE4_M)
145 402
146#define HOSTPAGESIZEPF4_MASK 0x0000000fU 403#define TIMERVALUE5_S 0
147#define HOSTPAGESIZEPF4_SHIFT 16 404#define TIMERVALUE5_M 0xffffU
148#define HOSTPAGESIZEPF4(x) ((x) << HOSTPAGESIZEPF4_SHIFT) 405#define TIMERVALUE5_V(x) ((x) << TIMERVALUE5_S)
406#define TIMERVALUE5_G(x) (((x) >> TIMERVALUE5_S) & TIMERVALUE5_M)
149 407
150#define HOSTPAGESIZEPF3_MASK 0x0000000fU 408#define SGE_DEBUG_INDEX_A 0x10cc
151#define HOSTPAGESIZEPF3_SHIFT 12 409#define SGE_DEBUG_DATA_HIGH_A 0x10d0
152#define HOSTPAGESIZEPF3(x) ((x) << HOSTPAGESIZEPF3_SHIFT) 410#define SGE_DEBUG_DATA_LOW_A 0x10d4
153 411
154#define HOSTPAGESIZEPF2_MASK 0x0000000fU 412#define SGE_DEBUG_DATA_LOW_INDEX_2_A 0x12c8
155#define HOSTPAGESIZEPF2_SHIFT 8 413#define SGE_DEBUG_DATA_LOW_INDEX_3_A 0x12cc
156#define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT) 414#define SGE_DEBUG_DATA_HIGH_INDEX_10_A 0x12a8
157 415
158#define HOSTPAGESIZEPF1_M 0x0000000fU 416#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
159#define HOSTPAGESIZEPF1_S 4 417#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
160#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_S)
161 418
162#define HOSTPAGESIZEPF0_M 0x0000000fU 419#define HP_INT_THRESH_S 28
163#define HOSTPAGESIZEPF0_S 0 420#define HP_INT_THRESH_M 0xfU
164#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_S) 421#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
165 422
166#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 423#define HP_COUNT_S 16
167#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014 424#define HP_COUNT_M 0x7ffU
425#define HP_COUNT_G(x) (((x) >> HP_COUNT_S) & HP_COUNT_M)
168 426
169#define QUEUESPERPAGEPF1_S 4 427#define LP_INT_THRESH_S 12
428#define LP_INT_THRESH_M 0xfU
429#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S)
170 430
171#define QUEUESPERPAGEPF0_S 0 431#define LP_COUNT_S 0
172#define QUEUESPERPAGEPF0_MASK 0x0000000fU 432#define LP_COUNT_M 0x7ffU
173#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) 433#define LP_COUNT_G(x) (((x) >> LP_COUNT_S) & LP_COUNT_M)
174 434
175#define QUEUESPERPAGEPF0 0 435#define LP_INT_THRESH_T5_S 18
176#define QUEUESPERPAGEPF1 4 436#define LP_INT_THRESH_T5_M 0xfffU
437#define LP_INT_THRESH_T5_V(x) ((x) << LP_INT_THRESH_T5_S)
177 438
178/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues. 439#define LP_COUNT_T5_S 0
179 * The User Doorbells are each 128 bytes in length with a Simple Doorbell at 440#define LP_COUNT_T5_M 0x3ffffU
180 * offsets 8x and a Write Combining single 64-byte Egress Queue Unit 441#define LP_COUNT_T5_G(x) (((x) >> LP_COUNT_T5_S) & LP_COUNT_T5_M)
181 * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues, 442
182 * we have a Going To Sleep register at offsets 8x+4. 443#define SGE_DOORBELL_CONTROL_A 0x10a8
183 * 444
184 * As noted above, we have many instances of the Simple Doorbell and Going To 445#define SGE_STAT_TOTAL_A 0x10e4
185 * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a 446#define SGE_STAT_MATCH_A 0x10e8
186 * non-64-byte aligned offset for the Simple Doorbell in order to attempt to 447#define SGE_STAT_CFG_A 0x10ec
187 * avoid buffering of the writes to the Simple Doorbell and we want to use a 448
188 * non-contiguous offset for the Going To Sleep writes in order to avoid 449#define STATSOURCE_T5_S 9
189 * possible combining between them. 450#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
190 */ 451
191#define SGE_UDB_SIZE 128 452#define SGE_DBFIFO_STATUS2_A 0x1118
192#define SGE_UDB_KDOORBELL 8 453
193#define SGE_UDB_GTS 20 454#define HP_INT_THRESH_T5_S 10
194#define SGE_UDB_WCDOORBELL 64 455#define HP_INT_THRESH_T5_M 0xfU
195 456#define HP_INT_THRESH_T5_V(x) ((x) << HP_INT_THRESH_T5_S)
196#define SGE_INT_CAUSE1 0x1024 457
197#define SGE_INT_CAUSE2 0x1030 458#define HP_COUNT_T5_S 0
198#define SGE_INT_CAUSE3 0x103c 459#define HP_COUNT_T5_M 0x3ffU
199#define ERR_FLM_DBP 0x80000000U 460#define HP_COUNT_T5_G(x) (((x) >> HP_COUNT_T5_S) & HP_COUNT_T5_M)
200#define ERR_FLM_IDMA1 0x40000000U 461
201#define ERR_FLM_IDMA0 0x20000000U 462#define ENABLE_DROP_S 13
202#define ERR_FLM_HINT 0x10000000U 463#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S)
203#define ERR_PCIE_ERROR3 0x08000000U 464#define ENABLE_DROP_F ENABLE_DROP_V(1U)
204#define ERR_PCIE_ERROR2 0x04000000U 465
205#define ERR_PCIE_ERROR1 0x02000000U 466#define DROPPED_DB_S 0
206#define ERR_PCIE_ERROR0 0x01000000U 467#define DROPPED_DB_V(x) ((x) << DROPPED_DB_S)
207#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U 468#define DROPPED_DB_F DROPPED_DB_V(1U)
208#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U 469
209#define ERR_INVALID_CIDX_INC 0x00200000U 470#define SGE_CTXT_CMD_A 0x11fc
210#define ERR_ITP_TIME_PAUSED 0x00100000U 471#define SGE_DBQ_CTXT_BADDR_A 0x1084
211#define ERR_CPL_OPCODE_0 0x00080000U 472
212#define ERR_DROPPED_DB 0x00040000U 473/* registers for module PCIE */
213#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U 474#define PCIE_PF_CFG_A 0x40
214#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U 475
215#define ERR_BAD_DB_PIDX3 0x00008000U 476#define AIVEC_S 4
216#define ERR_BAD_DB_PIDX2 0x00004000U 477#define AIVEC_M 0x3ffU
217#define ERR_BAD_DB_PIDX1 0x00002000U 478#define AIVEC_V(x) ((x) << AIVEC_S)
218#define ERR_BAD_DB_PIDX0 0x00001000U 479
219#define ERR_ING_PCIE_CHAN 0x00000800U 480#define PCIE_PF_CLI_A 0x44
220#define ERR_ING_CTXT_PRIO 0x00000400U 481#define PCIE_INT_CAUSE_A 0x3004
221#define ERR_EGR_CTXT_PRIO 0x00000200U 482
222#define DBFIFO_HP_INT 0x00000100U 483#define UNXSPLCPLERR_S 29
223#define DBFIFO_LP_INT 0x00000080U 484#define UNXSPLCPLERR_V(x) ((x) << UNXSPLCPLERR_S)
224#define REG_ADDRESS_ERR 0x00000040U 485#define UNXSPLCPLERR_F UNXSPLCPLERR_V(1U)
225#define INGRESS_SIZE_ERR 0x00000020U 486
226#define EGRESS_SIZE_ERR 0x00000010U 487#define PCIEPINT_S 28
227#define ERR_INV_CTXT3 0x00000008U 488#define PCIEPINT_V(x) ((x) << PCIEPINT_S)
228#define ERR_INV_CTXT2 0x00000004U 489#define PCIEPINT_F PCIEPINT_V(1U)
229#define ERR_INV_CTXT1 0x00000002U 490
230#define ERR_INV_CTXT0 0x00000001U 491#define PCIESINT_S 27
231 492#define PCIESINT_V(x) ((x) << PCIESINT_S)
232#define SGE_INT_ENABLE3 0x1040 493#define PCIESINT_F PCIESINT_V(1U)
233#define SGE_FL_BUFFER_SIZE0 0x1044 494
234#define SGE_FL_BUFFER_SIZE1 0x1048 495#define RPLPERR_S 26
235#define SGE_FL_BUFFER_SIZE2 0x104c 496#define RPLPERR_V(x) ((x) << RPLPERR_S)
236#define SGE_FL_BUFFER_SIZE3 0x1050 497#define RPLPERR_F RPLPERR_V(1U)
237#define SGE_FL_BUFFER_SIZE4 0x1054 498
238#define SGE_FL_BUFFER_SIZE5 0x1058 499#define RXWRPERR_S 25
239#define SGE_FL_BUFFER_SIZE6 0x105c 500#define RXWRPERR_V(x) ((x) << RXWRPERR_S)
240#define SGE_FL_BUFFER_SIZE7 0x1060 501#define RXWRPERR_F RXWRPERR_V(1U)
241#define SGE_FL_BUFFER_SIZE8 0x1064 502
242 503#define RXCPLPERR_S 24
243#define SGE_INGRESS_RX_THRESHOLD 0x10a0 504#define RXCPLPERR_V(x) ((x) << RXCPLPERR_S)
244#define THRESHOLD_0_MASK 0x3f000000U 505#define RXCPLPERR_F RXCPLPERR_V(1U)
245#define THRESHOLD_0_SHIFT 24 506
246#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT) 507#define PIOTAGPERR_S 23
247#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT) 508#define PIOTAGPERR_V(x) ((x) << PIOTAGPERR_S)
248#define THRESHOLD_1_MASK 0x003f0000U 509#define PIOTAGPERR_F PIOTAGPERR_V(1U)
249#define THRESHOLD_1_SHIFT 16 510
250#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT) 511#define MATAGPERR_S 22
251#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT) 512#define MATAGPERR_V(x) ((x) << MATAGPERR_S)
252#define THRESHOLD_2_MASK 0x00003f00U 513#define MATAGPERR_F MATAGPERR_V(1U)
253#define THRESHOLD_2_SHIFT 8 514
254#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT) 515#define INTXCLRPERR_S 21
255#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT) 516#define INTXCLRPERR_V(x) ((x) << INTXCLRPERR_S)
256#define THRESHOLD_3_MASK 0x0000003fU 517#define INTXCLRPERR_F INTXCLRPERR_V(1U)
257#define THRESHOLD_3_SHIFT 0 518
258#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) 519#define FIDPERR_S 20
259#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) 520#define FIDPERR_V(x) ((x) << FIDPERR_S)
260 521#define FIDPERR_F FIDPERR_V(1U)
261#define SGE_CONM_CTRL 0x1094 522
262#define EGRTHRESHOLD_MASK 0x00003f00U 523#define CFGSNPPERR_S 19
263#define EGRTHRESHOLDshift 8 524#define CFGSNPPERR_V(x) ((x) << CFGSNPPERR_S)
264#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift) 525#define CFGSNPPERR_F CFGSNPPERR_V(1U)
265#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift) 526
266 527#define HRSPPERR_S 18
267#define EGRTHRESHOLDPACKING_MASK 0x3fU 528#define HRSPPERR_V(x) ((x) << HRSPPERR_S)
268#define EGRTHRESHOLDPACKING_SHIFT 14 529#define HRSPPERR_F HRSPPERR_V(1U)
269#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT) 530
270#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \ 531#define HREQPERR_S 17
271 EGRTHRESHOLDPACKING_MASK) 532#define HREQPERR_V(x) ((x) << HREQPERR_S)
272 533#define HREQPERR_F HREQPERR_V(1U)
273#define SGE_DBFIFO_STATUS 0x10a4 534
274#define HP_INT_THRESH_SHIFT 28 535#define HCNTPERR_S 16
275#define HP_INT_THRESH_MASK 0xfU 536#define HCNTPERR_V(x) ((x) << HCNTPERR_S)
276#define HP_INT_THRESH(x) ((x) << HP_INT_THRESH_SHIFT) 537#define HCNTPERR_F HCNTPERR_V(1U)
277#define LP_INT_THRESH_SHIFT 12 538
278#define LP_INT_THRESH_MASK 0xfU 539#define DRSPPERR_S 15
279#define LP_INT_THRESH(x) ((x) << LP_INT_THRESH_SHIFT) 540#define DRSPPERR_V(x) ((x) << DRSPPERR_S)
280 541#define DRSPPERR_F DRSPPERR_V(1U)
281#define SGE_DOORBELL_CONTROL 0x10a8 542
282#define ENABLE_DROP (1 << 13) 543#define DREQPERR_S 14
283 544#define DREQPERR_V(x) ((x) << DREQPERR_S)
284#define S_NOCOALESCE 26 545#define DREQPERR_F DREQPERR_V(1U)
285#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE) 546
286#define F_NOCOALESCE V_NOCOALESCE(1U) 547#define DCNTPERR_S 13
287 548#define DCNTPERR_V(x) ((x) << DCNTPERR_S)
288#define SGE_TIMESTAMP_LO 0x1098 549#define DCNTPERR_F DCNTPERR_V(1U)
289#define SGE_TIMESTAMP_HI 0x109c 550
290#define S_TSVAL 0 551#define CRSPPERR_S 12
291#define M_TSVAL 0xfffffffU 552#define CRSPPERR_V(x) ((x) << CRSPPERR_S)
292#define GET_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL) 553#define CRSPPERR_F CRSPPERR_V(1U)
293 554
294#define SGE_TIMER_VALUE_0_AND_1 0x10b8 555#define CREQPERR_S 11
295#define TIMERVALUE0_MASK 0xffff0000U 556#define CREQPERR_V(x) ((x) << CREQPERR_S)
296#define TIMERVALUE0_SHIFT 16 557#define CREQPERR_F CREQPERR_V(1U)
297#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT) 558
298#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT) 559#define CCNTPERR_S 10
299#define TIMERVALUE1_MASK 0x0000ffffU 560#define CCNTPERR_V(x) ((x) << CCNTPERR_S)
300#define TIMERVALUE1_SHIFT 0 561#define CCNTPERR_F CCNTPERR_V(1U)
301#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT) 562
302#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) 563#define TARTAGPERR_S 9
303 564#define TARTAGPERR_V(x) ((x) << TARTAGPERR_S)
304#define SGE_TIMER_VALUE_2_AND_3 0x10bc 565#define TARTAGPERR_F TARTAGPERR_V(1U)
305#define TIMERVALUE2_MASK 0xffff0000U 566
306#define TIMERVALUE2_SHIFT 16 567#define PIOREQPERR_S 8
307#define TIMERVALUE2(x) ((x) << TIMERVALUE2_SHIFT) 568#define PIOREQPERR_V(x) ((x) << PIOREQPERR_S)
308#define TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT) 569#define PIOREQPERR_F PIOREQPERR_V(1U)
309#define TIMERVALUE3_MASK 0x0000ffffU 570
310#define TIMERVALUE3_SHIFT 0 571#define PIOCPLPERR_S 7
311#define TIMERVALUE3(x) ((x) << TIMERVALUE3_SHIFT) 572#define PIOCPLPERR_V(x) ((x) << PIOCPLPERR_S)
312#define TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT) 573#define PIOCPLPERR_F PIOCPLPERR_V(1U)
313 574
314#define SGE_TIMER_VALUE_4_AND_5 0x10c0 575#define MSIXDIPERR_S 6
315#define TIMERVALUE4_MASK 0xffff0000U 576#define MSIXDIPERR_V(x) ((x) << MSIXDIPERR_S)
316#define TIMERVALUE4_SHIFT 16 577#define MSIXDIPERR_F MSIXDIPERR_V(1U)
317#define TIMERVALUE4(x) ((x) << TIMERVALUE4_SHIFT) 578
318#define TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT) 579#define MSIXDATAPERR_S 5
319#define TIMERVALUE5_MASK 0x0000ffffU 580#define MSIXDATAPERR_V(x) ((x) << MSIXDATAPERR_S)
320#define TIMERVALUE5_SHIFT 0 581#define MSIXDATAPERR_F MSIXDATAPERR_V(1U)
321#define TIMERVALUE5(x) ((x) << TIMERVALUE5_SHIFT) 582
322#define TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT) 583#define MSIXADDRHPERR_S 4
323 584#define MSIXADDRHPERR_V(x) ((x) << MSIXADDRHPERR_S)
324#define SGE_DEBUG_INDEX 0x10cc 585#define MSIXADDRHPERR_F MSIXADDRHPERR_V(1U)
325#define SGE_DEBUG_DATA_HIGH 0x10d0 586
326#define SGE_DEBUG_DATA_LOW 0x10d4 587#define MSIXADDRLPERR_S 3
327#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8 588#define MSIXADDRLPERR_V(x) ((x) << MSIXADDRLPERR_S)
328#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc 589#define MSIXADDRLPERR_F MSIXADDRLPERR_V(1U)
329#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8 590
330#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 591#define MSIDATAPERR_S 2
331#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8 592#define MSIDATAPERR_V(x) ((x) << MSIDATAPERR_S)
593#define MSIDATAPERR_F MSIDATAPERR_V(1U)
594
595#define MSIADDRHPERR_S 1
596#define MSIADDRHPERR_V(x) ((x) << MSIADDRHPERR_S)
597#define MSIADDRHPERR_F MSIADDRHPERR_V(1U)
598
599#define MSIADDRLPERR_S 0
600#define MSIADDRLPERR_V(x) ((x) << MSIADDRLPERR_S)
601#define MSIADDRLPERR_F MSIADDRLPERR_V(1U)
602
603#define READRSPERR_S 29
604#define READRSPERR_V(x) ((x) << READRSPERR_S)
605#define READRSPERR_F READRSPERR_V(1U)
606
607#define TRGT1GRPPERR_S 28
608#define TRGT1GRPPERR_V(x) ((x) << TRGT1GRPPERR_S)
609#define TRGT1GRPPERR_F TRGT1GRPPERR_V(1U)
610
611#define IPSOTPERR_S 27
612#define IPSOTPERR_V(x) ((x) << IPSOTPERR_S)
613#define IPSOTPERR_F IPSOTPERR_V(1U)
614
615#define IPRETRYPERR_S 26
616#define IPRETRYPERR_V(x) ((x) << IPRETRYPERR_S)
617#define IPRETRYPERR_F IPRETRYPERR_V(1U)
618
619#define IPRXDATAGRPPERR_S 25
620#define IPRXDATAGRPPERR_V(x) ((x) << IPRXDATAGRPPERR_S)
621#define IPRXDATAGRPPERR_F IPRXDATAGRPPERR_V(1U)
622
623#define IPRXHDRGRPPERR_S 24
624#define IPRXHDRGRPPERR_V(x) ((x) << IPRXHDRGRPPERR_S)
625#define IPRXHDRGRPPERR_F IPRXHDRGRPPERR_V(1U)
626
627#define MAGRPPERR_S 22
628#define MAGRPPERR_V(x) ((x) << MAGRPPERR_S)
629#define MAGRPPERR_F MAGRPPERR_V(1U)
630
631#define VFIDPERR_S 21
632#define VFIDPERR_V(x) ((x) << VFIDPERR_S)
633#define VFIDPERR_F VFIDPERR_V(1U)
634
635#define HREQWRPERR_S 16
636#define HREQWRPERR_V(x) ((x) << HREQWRPERR_S)
637#define HREQWRPERR_F HREQWRPERR_V(1U)
638
639#define DREQWRPERR_S 13
640#define DREQWRPERR_V(x) ((x) << DREQWRPERR_S)
641#define DREQWRPERR_F DREQWRPERR_V(1U)
642
643#define CREQRDPERR_S 11
644#define CREQRDPERR_V(x) ((x) << CREQRDPERR_S)
645#define CREQRDPERR_F CREQRDPERR_V(1U)
646
647#define MSTTAGQPERR_S 10
648#define MSTTAGQPERR_V(x) ((x) << MSTTAGQPERR_S)
649#define MSTTAGQPERR_F MSTTAGQPERR_V(1U)
650
651#define PIOREQGRPPERR_S 8
652#define PIOREQGRPPERR_V(x) ((x) << PIOREQGRPPERR_S)
653#define PIOREQGRPPERR_F PIOREQGRPPERR_V(1U)
654
655#define PIOCPLGRPPERR_S 7
656#define PIOCPLGRPPERR_V(x) ((x) << PIOCPLGRPPERR_S)
657#define PIOCPLGRPPERR_F PIOCPLGRPPERR_V(1U)
658
659#define MSIXSTIPERR_S 2
660#define MSIXSTIPERR_V(x) ((x) << MSIXSTIPERR_S)
661#define MSIXSTIPERR_F MSIXSTIPERR_V(1U)
662
663#define MSTTIMEOUTPERR_S 1
664#define MSTTIMEOUTPERR_V(x) ((x) << MSTTIMEOUTPERR_S)
665#define MSTTIMEOUTPERR_F MSTTIMEOUTPERR_V(1U)
666
667#define MSTGRPPERR_S 0
668#define MSTGRPPERR_V(x) ((x) << MSTGRPPERR_S)
669#define MSTGRPPERR_F MSTGRPPERR_V(1U)
670
671#define PCIE_NONFAT_ERR_A 0x3010
672#define PCIE_CFG_SPACE_REQ_A 0x3060
673#define PCIE_CFG_SPACE_DATA_A 0x3064
674#define PCIE_MEM_ACCESS_BASE_WIN_A 0x3068
675
676#define PCIEOFST_S 10
677#define PCIEOFST_M 0x3fffffU
678#define PCIEOFST_G(x) (((x) >> PCIEOFST_S) & PCIEOFST_M)
679
680#define BIR_S 8
681#define BIR_M 0x3U
682#define BIR_V(x) ((x) << BIR_S)
683#define BIR_G(x) (((x) >> BIR_S) & BIR_M)
684
685#define WINDOW_S 0
686#define WINDOW_M 0xffU
687#define WINDOW_V(x) ((x) << WINDOW_S)
688#define WINDOW_G(x) (((x) >> WINDOW_S) & WINDOW_M)
689
690#define PCIE_MEM_ACCESS_OFFSET_A 0x306c
691
692#define ENABLE_S 30
693#define ENABLE_V(x) ((x) << ENABLE_S)
694#define ENABLE_F ENABLE_V(1U)
695
696#define LOCALCFG_S 28
697#define LOCALCFG_V(x) ((x) << LOCALCFG_S)
698#define LOCALCFG_F LOCALCFG_V(1U)
699
700#define FUNCTION_S 12
701#define FUNCTION_V(x) ((x) << FUNCTION_S)
702
703#define REGISTER_S 0
704#define REGISTER_V(x) ((x) << REGISTER_S)
705
706#define PFNUM_S 0
707#define PFNUM_V(x) ((x) << PFNUM_S)
708
709#define PCIE_FW_A 0x30b8
710
711#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
712
713#define RNPP_S 31
714#define RNPP_V(x) ((x) << RNPP_S)
715#define RNPP_F RNPP_V(1U)
716
717#define RPCP_S 29
718#define RPCP_V(x) ((x) << RPCP_S)
719#define RPCP_F RPCP_V(1U)
720
721#define RCIP_S 27
722#define RCIP_V(x) ((x) << RCIP_S)
723#define RCIP_F RCIP_V(1U)
724
725#define RCCP_S 26
726#define RCCP_V(x) ((x) << RCCP_S)
727#define RCCP_F RCCP_V(1U)
728
729#define RFTP_S 23
730#define RFTP_V(x) ((x) << RFTP_S)
731#define RFTP_F RFTP_V(1U)
732
733#define PTRP_S 20
734#define PTRP_V(x) ((x) << PTRP_S)
735#define PTRP_F PTRP_V(1U)
332 736
333#define S_HP_INT_THRESH 28 737#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A 0x59a4
334#define M_HP_INT_THRESH 0xfU
335#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
336#define S_LP_INT_THRESH_T5 18
337#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
338#define M_LP_COUNT_T5 0x3ffffU
339#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
340#define M_HP_COUNT 0x7ffU
341#define S_HP_COUNT 16
342#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
343#define S_LP_INT_THRESH 12
344#define M_LP_INT_THRESH 0xfU
345#define M_LP_INT_THRESH_T5 0xfffU
346#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
347#define M_LP_COUNT 0x7ffU
348#define S_LP_COUNT 0
349#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
350#define A_SGE_DBFIFO_STATUS 0x10a4
351
352#define SGE_STAT_TOTAL 0x10e4
353#define SGE_STAT_MATCH 0x10e8
354
355#define SGE_STAT_CFG 0x10ec
356#define S_STATSOURCE_T5 9
357#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
358
359#define SGE_DBFIFO_STATUS2 0x1118
360#define M_HP_COUNT_T5 0x3ffU
361#define G_HP_COUNT_T5(x) ((x) & M_HP_COUNT_T5)
362#define S_HP_INT_THRESH_T5 10
363#define M_HP_INT_THRESH_T5 0xfU
364#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
365
366#define S_ENABLE_DROP 13
367#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
368#define F_ENABLE_DROP V_ENABLE_DROP(1U)
369#define S_DROPPED_DB 0
370#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
371#define F_DROPPED_DB V_DROPPED_DB(1U)
372#define A_SGE_DOORBELL_CONTROL 0x10a8
373
374#define A_SGE_CTXT_CMD 0x11fc
375#define A_SGE_DBQ_CTXT_BADDR 0x1084
376
377#define PCIE_PF_CFG 0x40
378#define AIVEC(x) ((x) << 4)
379#define AIVEC_MASK 0x3ffU
380
381#define PCIE_PF_CLI 0x44
382#define PCIE_INT_CAUSE 0x3004
383#define UNXSPLCPLERR 0x20000000U
384#define PCIEPINT 0x10000000U
385#define PCIESINT 0x08000000U
386#define RPLPERR 0x04000000U
387#define RXWRPERR 0x02000000U
388#define RXCPLPERR 0x01000000U
389#define PIOTAGPERR 0x00800000U
390#define MATAGPERR 0x00400000U
391#define INTXCLRPERR 0x00200000U
392#define FIDPERR 0x00100000U
393#define CFGSNPPERR 0x00080000U
394#define HRSPPERR 0x00040000U
395#define HREQPERR 0x00020000U
396#define HCNTPERR 0x00010000U
397#define DRSPPERR 0x00008000U
398#define DREQPERR 0x00004000U
399#define DCNTPERR 0x00002000U
400#define CRSPPERR 0x00001000U
401#define CREQPERR 0x00000800U
402#define CCNTPERR 0x00000400U
403#define TARTAGPERR 0x00000200U
404#define PIOREQPERR 0x00000100U
405#define PIOCPLPERR 0x00000080U
406#define MSIXDIPERR 0x00000040U
407#define MSIXDATAPERR 0x00000020U
408#define MSIXADDRHPERR 0x00000010U
409#define MSIXADDRLPERR 0x00000008U
410#define MSIDATAPERR 0x00000004U
411#define MSIADDRHPERR 0x00000002U
412#define MSIADDRLPERR 0x00000001U
413
414#define READRSPERR 0x20000000U
415#define TRGT1GRPPERR 0x10000000U
416#define IPSOTPERR 0x08000000U
417#define IPRXDATAGRPPERR 0x02000000U
418#define IPRXHDRGRPPERR 0x01000000U
419#define MAGRPPERR 0x00400000U
420#define VFIDPERR 0x00200000U
421#define HREQWRPERR 0x00010000U
422#define DREQWRPERR 0x00002000U
423#define MSTTAGQPERR 0x00000400U
424#define PIOREQGRPPERR 0x00000100U
425#define PIOCPLGRPPERR 0x00000080U
426#define MSIXSTIPERR 0x00000004U
427#define MSTTIMEOUTPERR 0x00000002U
428#define MSTGRPPERR 0x00000001U
429
430#define PCIE_NONFAT_ERR 0x3010
431#define PCIE_CFG_SPACE_REQ 0x3060
432#define PCIE_CFG_SPACE_DATA 0x3064
433#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
434#define S_PCIEOFST 10
435#define M_PCIEOFST 0x3fffffU
436#define GET_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
437#define PCIEOFST_MASK 0xfffffc00U
438#define BIR_MASK 0x00000300U
439#define BIR_SHIFT 8
440#define BIR(x) ((x) << BIR_SHIFT)
441#define WINDOW_MASK 0x000000ffU
442#define WINDOW_SHIFT 0
443#define WINDOW(x) ((x) << WINDOW_SHIFT)
444#define GET_WINDOW(x) (((x) >> WINDOW_SHIFT) & WINDOW_MASK)
445#define PCIE_MEM_ACCESS_OFFSET 0x306c
446#define ENABLE (1U << 30)
447#define FUNCTION(x) ((x) << 12)
448#define F_LOCALCFG (1U << 28)
449
450#define S_PFNUM 0
451#define V_PFNUM(x) ((x) << S_PFNUM)
452
453#define PCIE_FW 0x30b8
454#define PCIE_FW_ERR 0x80000000U
455#define PCIE_FW_INIT 0x40000000U
456#define PCIE_FW_HALT 0x20000000U
457#define PCIE_FW_MASTER_VLD 0x00008000U
458#define PCIE_FW_MASTER(x) ((x) << 12)
459#define PCIE_FW_MASTER_MASK 0x7
460#define PCIE_FW_MASTER_GET(x) (((x) >> 12) & PCIE_FW_MASTER_MASK)
461
462#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
463#define RNPP 0x80000000U
464#define RPCP 0x20000000U
465#define RCIP 0x08000000U
466#define RCCP 0x04000000U
467#define RFTP 0x00800000U
468#define PTRP 0x00100000U
469
470#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
471#define TPCP 0x40000000U
472#define TNPP 0x20000000U
473#define TFTP 0x10000000U
474#define TCAP 0x08000000U
475#define TCIP 0x04000000U
476#define RCAP 0x02000000U
477#define PLUP 0x00800000U
478#define PLDN 0x00400000U
479#define OTDD 0x00200000U
480#define GTRP 0x00100000U
481#define RDPE 0x00040000U
482#define TDCE 0x00020000U
483#define TDUE 0x00010000U
484
485#define MC_INT_CAUSE 0x7518
486#define MC_P_INT_CAUSE 0x41318
487#define ECC_UE_INT_CAUSE 0x00000004U
488#define ECC_CE_INT_CAUSE 0x00000002U
489#define PERR_INT_CAUSE 0x00000001U
490
491#define MC_ECC_STATUS 0x751c
492#define MC_P_ECC_STATUS 0x4131c
493#define ECC_CECNT_MASK 0xffff0000U
494#define ECC_CECNT_SHIFT 16
495#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT)
496#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
497#define ECC_UECNT_MASK 0x0000ffffU
498#define ECC_UECNT_SHIFT 0
499#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT)
500#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
501
502#define MC_BIST_CMD 0x7600
503#define START_BIST 0x80000000U
504#define BIST_CMD_GAP_MASK 0x0000ff00U
505#define BIST_CMD_GAP_SHIFT 8
506#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT)
507#define BIST_OPCODE_MASK 0x00000003U
508#define BIST_OPCODE_SHIFT 0
509#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT)
510
511#define MC_BIST_CMD_ADDR 0x7604
512#define MC_BIST_CMD_LEN 0x7608
513#define MC_BIST_DATA_PATTERN 0x760c
514#define BIST_DATA_TYPE_MASK 0x0000000fU
515#define BIST_DATA_TYPE_SHIFT 0
516#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT)
517
518#define MC_BIST_STATUS_RDATA 0x7688
519 738
739#define TPCP_S 30
740#define TPCP_V(x) ((x) << TPCP_S)
741#define TPCP_F TPCP_V(1U)
742
743#define TNPP_S 29
744#define TNPP_V(x) ((x) << TNPP_S)
745#define TNPP_F TNPP_V(1U)
746
747#define TFTP_S 28
748#define TFTP_V(x) ((x) << TFTP_S)
749#define TFTP_F TFTP_V(1U)
750
751#define TCAP_S 27
752#define TCAP_V(x) ((x) << TCAP_S)
753#define TCAP_F TCAP_V(1U)
754
755#define TCIP_S 26
756#define TCIP_V(x) ((x) << TCIP_S)
757#define TCIP_F TCIP_V(1U)
758
759#define RCAP_S 25
760#define RCAP_V(x) ((x) << RCAP_S)
761#define RCAP_F RCAP_V(1U)
762
763#define PLUP_S 23
764#define PLUP_V(x) ((x) << PLUP_S)
765#define PLUP_F PLUP_V(1U)
766
767#define PLDN_S 22
768#define PLDN_V(x) ((x) << PLDN_S)
769#define PLDN_F PLDN_V(1U)
770
771#define OTDD_S 21
772#define OTDD_V(x) ((x) << OTDD_S)
773#define OTDD_F OTDD_V(1U)
774
775#define GTRP_S 20
776#define GTRP_V(x) ((x) << GTRP_S)
777#define GTRP_F GTRP_V(1U)
778
779#define RDPE_S 18
780#define RDPE_V(x) ((x) << RDPE_S)
781#define RDPE_F RDPE_V(1U)
782
783#define TDCE_S 17
784#define TDCE_V(x) ((x) << TDCE_S)
785#define TDCE_F TDCE_V(1U)
786
787#define TDUE_S 16
788#define TDUE_V(x) ((x) << TDUE_S)
789#define TDUE_F TDUE_V(1U)
790
791/* registers for module MC */
792#define MC_INT_CAUSE_A 0x7518
793#define MC_P_INT_CAUSE_A 0x41318
794
795#define ECC_UE_INT_CAUSE_S 2
796#define ECC_UE_INT_CAUSE_V(x) ((x) << ECC_UE_INT_CAUSE_S)
797#define ECC_UE_INT_CAUSE_F ECC_UE_INT_CAUSE_V(1U)
798
799#define ECC_CE_INT_CAUSE_S 1
800#define ECC_CE_INT_CAUSE_V(x) ((x) << ECC_CE_INT_CAUSE_S)
801#define ECC_CE_INT_CAUSE_F ECC_CE_INT_CAUSE_V(1U)
802
803#define PERR_INT_CAUSE_S 0
804#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
805#define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U)
806
807#define MC_ECC_STATUS_A 0x751c
808#define MC_P_ECC_STATUS_A 0x4131c
809
810#define ECC_CECNT_S 16
811#define ECC_CECNT_M 0xffffU
812#define ECC_CECNT_V(x) ((x) << ECC_CECNT_S)
813#define ECC_CECNT_G(x) (((x) >> ECC_CECNT_S) & ECC_CECNT_M)
814
815#define ECC_UECNT_S 0
816#define ECC_UECNT_M 0xffffU
817#define ECC_UECNT_V(x) ((x) << ECC_UECNT_S)
818#define ECC_UECNT_G(x) (((x) >> ECC_UECNT_S) & ECC_UECNT_M)
819
820#define MC_BIST_CMD_A 0x7600
821
822#define START_BIST_S 31
823#define START_BIST_V(x) ((x) << START_BIST_S)
824#define START_BIST_F START_BIST_V(1U)
825
826#define BIST_CMD_GAP_S 8
827#define BIST_CMD_GAP_V(x) ((x) << BIST_CMD_GAP_S)
828
829#define BIST_OPCODE_S 0
830#define BIST_OPCODE_V(x) ((x) << BIST_OPCODE_S)
831
832#define MC_BIST_CMD_ADDR_A 0x7604
833#define MC_BIST_CMD_LEN_A 0x7608
834#define MC_BIST_DATA_PATTERN_A 0x760c
835
836#define MC_BIST_STATUS_RDATA_A 0x7688
837
838/* registers for module MA */
520#define MA_EDRAM0_BAR_A 0x77c0 839#define MA_EDRAM0_BAR_A 0x77c0
521 840
522#define EDRAM0_SIZE_S 0 841#define EDRAM0_SIZE_S 0
@@ -574,263 +893,608 @@
574#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S) 893#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S)
575#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U) 894#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U)
576 895
577#define MA_INT_CAUSE 0x77e0 896#define MA_INT_CAUSE_A 0x77e0
578#define MEM_PERR_INT_CAUSE 0x00000002U 897
579#define MEM_WRAP_INT_CAUSE 0x00000001U 898#define MEM_PERR_INT_CAUSE_S 1
580 899#define MEM_PERR_INT_CAUSE_V(x) ((x) << MEM_PERR_INT_CAUSE_S)
581#define MA_INT_WRAP_STATUS 0x77e4 900#define MEM_PERR_INT_CAUSE_F MEM_PERR_INT_CAUSE_V(1U)
582#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U 901
583#define MEM_WRAP_ADDRESS_SHIFT 4 902#define MEM_WRAP_INT_CAUSE_S 0
584#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT) 903#define MEM_WRAP_INT_CAUSE_V(x) ((x) << MEM_WRAP_INT_CAUSE_S)
585#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU 904#define MEM_WRAP_INT_CAUSE_F MEM_WRAP_INT_CAUSE_V(1U)
586#define MEM_WRAP_CLIENT_NUM_SHIFT 0 905
587#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) 906#define MA_INT_WRAP_STATUS_A 0x77e4
588#define MA_PCIE_FW 0x30b8 907
589#define MA_PARITY_ERROR_STATUS 0x77f4 908#define MEM_WRAP_ADDRESS_S 4
590#define MA_PARITY_ERROR_STATUS2 0x7804 909#define MEM_WRAP_ADDRESS_M 0xfffffffU
591 910#define MEM_WRAP_ADDRESS_G(x) (((x) >> MEM_WRAP_ADDRESS_S) & MEM_WRAP_ADDRESS_M)
592#define EDC_0_BASE_ADDR 0x7900 911
593 912#define MEM_WRAP_CLIENT_NUM_S 0
594#define EDC_BIST_CMD 0x7904 913#define MEM_WRAP_CLIENT_NUM_M 0xfU
595#define EDC_BIST_CMD_ADDR 0x7908 914#define MEM_WRAP_CLIENT_NUM_G(x) \
596#define EDC_BIST_CMD_LEN 0x790c 915 (((x) >> MEM_WRAP_CLIENT_NUM_S) & MEM_WRAP_CLIENT_NUM_M)
597#define EDC_BIST_DATA_PATTERN 0x7910 916
598#define EDC_BIST_STATUS_RDATA 0x7928 917#define MA_PARITY_ERROR_STATUS_A 0x77f4
599#define EDC_INT_CAUSE 0x7978 918#define MA_PARITY_ERROR_STATUS1_A 0x77f4
600#define ECC_UE_PAR 0x00000020U 919#define MA_PARITY_ERROR_STATUS2_A 0x7804
601#define ECC_CE_PAR 0x00000010U 920
602#define PERR_PAR_CAUSE 0x00000008U 921/* registers for module EDC_0 */
603 922#define EDC_0_BASE_ADDR 0x7900
604#define EDC_ECC_STATUS 0x797c 923
605 924#define EDC_BIST_CMD_A 0x7904
606#define EDC_1_BASE_ADDR 0x7980 925#define EDC_BIST_CMD_ADDR_A 0x7908
607 926#define EDC_BIST_CMD_LEN_A 0x790c
608#define CIM_BOOT_CFG 0x7b00 927#define EDC_BIST_DATA_PATTERN_A 0x7910
609#define BOOTADDR_MASK 0xffffff00U 928#define EDC_BIST_STATUS_RDATA_A 0x7928
610#define UPCRST 0x1U 929#define EDC_INT_CAUSE_A 0x7978
611 930
612#define CIM_PF_MAILBOX_DATA 0x240 931#define ECC_UE_PAR_S 5
613#define CIM_PF_MAILBOX_CTRL 0x280 932#define ECC_UE_PAR_V(x) ((x) << ECC_UE_PAR_S)
614#define MBMSGVALID 0x00000008U 933#define ECC_UE_PAR_F ECC_UE_PAR_V(1U)
615#define MBINTREQ 0x00000004U 934
616#define MBOWNER_MASK 0x00000003U 935#define ECC_CE_PAR_S 4
617#define MBOWNER_SHIFT 0 936#define ECC_CE_PAR_V(x) ((x) << ECC_CE_PAR_S)
618#define MBOWNER(x) ((x) << MBOWNER_SHIFT) 937#define ECC_CE_PAR_F ECC_CE_PAR_V(1U)
619#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT) 938
620 939#define PERR_PAR_CAUSE_S 3
621#define CIM_PF_HOST_INT_ENABLE 0x288 940#define PERR_PAR_CAUSE_V(x) ((x) << PERR_PAR_CAUSE_S)
622#define MBMSGRDYINTEN(x) ((x) << 19) 941#define PERR_PAR_CAUSE_F PERR_PAR_CAUSE_V(1U)
623 942
624#define CIM_PF_HOST_INT_CAUSE 0x28c 943#define EDC_ECC_STATUS_A 0x797c
625#define MBMSGRDYINT 0x00080000U 944
626 945/* registers for module EDC_1 */
627#define CIM_HOST_INT_CAUSE 0x7b2c 946#define EDC_1_BASE_ADDR 0x7980
628#define TIEQOUTPARERRINT 0x00100000U 947
629#define TIEQINPARERRINT 0x00080000U 948/* registers for module CIM */
630#define MBHOSTPARERR 0x00040000U 949#define CIM_BOOT_CFG_A 0x7b00
631#define MBUPPARERR 0x00020000U 950#define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
632#define IBQPARERR 0x0001f800U 951
633#define IBQTP0PARERR 0x00010000U 952#define BOOTADDR_M 0xffffff00U
634#define IBQTP1PARERR 0x00008000U 953
635#define IBQULPPARERR 0x00004000U 954#define UPCRST_S 0
636#define IBQSGELOPARERR 0x00002000U 955#define UPCRST_V(x) ((x) << UPCRST_S)
637#define IBQSGEHIPARERR 0x00001000U 956#define UPCRST_F UPCRST_V(1U)
638#define IBQNCSIPARERR 0x00000800U 957
639#define OBQPARERR 0x000007e0U 958#define CIM_PF_MAILBOX_DATA_A 0x240
640#define OBQULP0PARERR 0x00000400U 959#define CIM_PF_MAILBOX_CTRL_A 0x280
641#define OBQULP1PARERR 0x00000200U 960
642#define OBQULP2PARERR 0x00000100U 961#define MBMSGVALID_S 3
643#define OBQULP3PARERR 0x00000080U 962#define MBMSGVALID_V(x) ((x) << MBMSGVALID_S)
644#define OBQSGEPARERR 0x00000040U 963#define MBMSGVALID_F MBMSGVALID_V(1U)
645#define OBQNCSIPARERR 0x00000020U 964
646#define PREFDROPINT 0x00000002U 965#define MBINTREQ_S 2
647#define UPACCNONZERO 0x00000001U 966#define MBINTREQ_V(x) ((x) << MBINTREQ_S)
648 967#define MBINTREQ_F MBINTREQ_V(1U)
649#define CIM_HOST_UPACC_INT_CAUSE 0x7b34 968
650#define EEPROMWRINT 0x40000000U 969#define MBOWNER_S 0
651#define TIMEOUTMAINT 0x20000000U 970#define MBOWNER_M 0x3U
652#define TIMEOUTINT 0x10000000U 971#define MBOWNER_V(x) ((x) << MBOWNER_S)
653#define RSPOVRLOOKUPINT 0x08000000U 972#define MBOWNER_G(x) (((x) >> MBOWNER_S) & MBOWNER_M)
654#define REQOVRLOOKUPINT 0x04000000U 973
655#define BLKWRPLINT 0x02000000U 974#define CIM_PF_HOST_INT_ENABLE_A 0x288
656#define BLKRDPLINT 0x01000000U 975
657#define SGLWRPLINT 0x00800000U 976#define MBMSGRDYINTEN_S 19
658#define SGLRDPLINT 0x00400000U 977#define MBMSGRDYINTEN_V(x) ((x) << MBMSGRDYINTEN_S)
659#define BLKWRCTLINT 0x00200000U 978#define MBMSGRDYINTEN_F MBMSGRDYINTEN_V(1U)
660#define BLKRDCTLINT 0x00100000U 979
661#define SGLWRCTLINT 0x00080000U 980#define CIM_PF_HOST_INT_CAUSE_A 0x28c
662#define SGLRDCTLINT 0x00040000U 981
663#define BLKWREEPROMINT 0x00020000U 982#define MBMSGRDYINT_S 19
664#define BLKRDEEPROMINT 0x00010000U 983#define MBMSGRDYINT_V(x) ((x) << MBMSGRDYINT_S)
665#define SGLWREEPROMINT 0x00008000U 984#define MBMSGRDYINT_F MBMSGRDYINT_V(1U)
666#define SGLRDEEPROMINT 0x00004000U 985
667#define BLKWRFLASHINT 0x00002000U 986#define CIM_HOST_INT_CAUSE_A 0x7b2c
668#define BLKRDFLASHINT 0x00001000U 987
669#define SGLWRFLASHINT 0x00000800U 988#define TIEQOUTPARERRINT_S 20
670#define SGLRDFLASHINT 0x00000400U 989#define TIEQOUTPARERRINT_V(x) ((x) << TIEQOUTPARERRINT_S)
671#define BLKWRBOOTINT 0x00000200U 990#define TIEQOUTPARERRINT_F TIEQOUTPARERRINT_V(1U)
672#define BLKRDBOOTINT 0x00000100U 991
673#define SGLWRBOOTINT 0x00000080U 992#define TIEQINPARERRINT_S 19
674#define SGLRDBOOTINT 0x00000040U 993#define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S)
675#define ILLWRBEINT 0x00000020U 994#define TIEQINPARERRINT_F TIEQINPARERRINT_V(1U)
676#define ILLRDBEINT 0x00000010U 995
677#define ILLRDINT 0x00000008U 996#define PREFDROPINT_S 1
678#define ILLWRINT 0x00000004U 997#define PREFDROPINT_V(x) ((x) << PREFDROPINT_S)
679#define ILLTRANSINT 0x00000002U 998#define PREFDROPINT_F PREFDROPINT_V(1U)
680#define RSVDSPACEINT 0x00000001U 999
681 1000#define UPACCNONZERO_S 0
682#define TP_OUT_CONFIG 0x7d04 1001#define UPACCNONZERO_V(x) ((x) << UPACCNONZERO_S)
683#define VLANEXTENABLE_MASK 0x0000f000U 1002#define UPACCNONZERO_F UPACCNONZERO_V(1U)
684#define VLANEXTENABLE_SHIFT 12 1003
685 1004#define MBHOSTPARERR_S 18
686#define TP_GLOBAL_CONFIG 0x7d08 1005#define MBHOSTPARERR_V(x) ((x) << MBHOSTPARERR_S)
687#define FIVETUPLELOOKUP_SHIFT 17 1006#define MBHOSTPARERR_F MBHOSTPARERR_V(1U)
688#define FIVETUPLELOOKUP_MASK 0x00060000U 1007
689#define FIVETUPLELOOKUP(x) ((x) << FIVETUPLELOOKUP_SHIFT) 1008#define MBUPPARERR_S 17
690#define FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \ 1009#define MBUPPARERR_V(x) ((x) << MBUPPARERR_S)
691 FIVETUPLELOOKUP_SHIFT) 1010#define MBUPPARERR_F MBUPPARERR_V(1U)
692 1011
693#define TP_PARA_REG2 0x7d68 1012#define IBQTP0PARERR_S 16
694#define MAXRXDATA_MASK 0xffff0000U 1013#define IBQTP0PARERR_V(x) ((x) << IBQTP0PARERR_S)
695#define MAXRXDATA_SHIFT 16 1014#define IBQTP0PARERR_F IBQTP0PARERR_V(1U)
696#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT) 1015
697 1016#define IBQTP1PARERR_S 15
698#define TP_TIMER_RESOLUTION 0x7d90 1017#define IBQTP1PARERR_V(x) ((x) << IBQTP1PARERR_S)
699#define TIMERRESOLUTION_MASK 0x00ff0000U 1018#define IBQTP1PARERR_F IBQTP1PARERR_V(1U)
700#define TIMERRESOLUTION_SHIFT 16 1019
701#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) 1020#define IBQULPPARERR_S 14
702#define DELAYEDACKRESOLUTION_MASK 0x000000ffU 1021#define IBQULPPARERR_V(x) ((x) << IBQULPPARERR_S)
703#define DELAYEDACKRESOLUTION_SHIFT 0 1022#define IBQULPPARERR_F IBQULPPARERR_V(1U)
704#define DELAYEDACKRESOLUTION_GET(x) \ 1023
705 (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT) 1024#define IBQSGELOPARERR_S 13
706 1025#define IBQSGELOPARERR_V(x) ((x) << IBQSGELOPARERR_S)
707#define TP_SHIFT_CNT 0x7dc0 1026#define IBQSGELOPARERR_F IBQSGELOPARERR_V(1U)
708#define SYNSHIFTMAX_SHIFT 24 1027
709#define SYNSHIFTMAX_MASK 0xff000000U 1028#define IBQSGEHIPARERR_S 12
710#define SYNSHIFTMAX(x) ((x) << SYNSHIFTMAX_SHIFT) 1029#define IBQSGEHIPARERR_V(x) ((x) << IBQSGEHIPARERR_S)
711#define SYNSHIFTMAX_GET(x) (((x) & SYNSHIFTMAX_MASK) >> \ 1030#define IBQSGEHIPARERR_F IBQSGEHIPARERR_V(1U)
712 SYNSHIFTMAX_SHIFT) 1031
713#define RXTSHIFTMAXR1_SHIFT 20 1032#define IBQNCSIPARERR_S 11
714#define RXTSHIFTMAXR1_MASK 0x00f00000U 1033#define IBQNCSIPARERR_V(x) ((x) << IBQNCSIPARERR_S)
715#define RXTSHIFTMAXR1(x) ((x) << RXTSHIFTMAXR1_SHIFT) 1034#define IBQNCSIPARERR_F IBQNCSIPARERR_V(1U)
716#define RXTSHIFTMAXR1_GET(x) (((x) & RXTSHIFTMAXR1_MASK) >> \ 1035
717 RXTSHIFTMAXR1_SHIFT) 1036#define OBQULP0PARERR_S 10
718#define RXTSHIFTMAXR2_SHIFT 16 1037#define OBQULP0PARERR_V(x) ((x) << OBQULP0PARERR_S)
719#define RXTSHIFTMAXR2_MASK 0x000f0000U 1038#define OBQULP0PARERR_F OBQULP0PARERR_V(1U)
720#define RXTSHIFTMAXR2(x) ((x) << RXTSHIFTMAXR2_SHIFT) 1039
721#define RXTSHIFTMAXR2_GET(x) (((x) & RXTSHIFTMAXR2_MASK) >> \ 1040#define OBQULP1PARERR_S 9
722 RXTSHIFTMAXR2_SHIFT) 1041#define OBQULP1PARERR_V(x) ((x) << OBQULP1PARERR_S)
723#define PERSHIFTBACKOFFMAX_SHIFT 12 1042#define OBQULP1PARERR_F OBQULP1PARERR_V(1U)
724#define PERSHIFTBACKOFFMAX_MASK 0x0000f000U 1043
725#define PERSHIFTBACKOFFMAX(x) ((x) << PERSHIFTBACKOFFMAX_SHIFT) 1044#define OBQULP2PARERR_S 8
726#define PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \ 1045#define OBQULP2PARERR_V(x) ((x) << OBQULP2PARERR_S)
727 PERSHIFTBACKOFFMAX_SHIFT) 1046#define OBQULP2PARERR_F OBQULP2PARERR_V(1U)
728#define PERSHIFTMAX_SHIFT 8 1047
729#define PERSHIFTMAX_MASK 0x00000f00U 1048#define OBQULP3PARERR_S 7
730#define PERSHIFTMAX(x) ((x) << PERSHIFTMAX_SHIFT) 1049#define OBQULP3PARERR_V(x) ((x) << OBQULP3PARERR_S)
731#define PERSHIFTMAX_GET(x) (((x) & PERSHIFTMAX_MASK) >> \ 1050#define OBQULP3PARERR_F OBQULP3PARERR_V(1U)
732 PERSHIFTMAX_SHIFT) 1051
733#define KEEPALIVEMAXR1_SHIFT 4 1052#define OBQSGEPARERR_S 6
734#define KEEPALIVEMAXR1_MASK 0x000000f0U 1053#define OBQSGEPARERR_V(x) ((x) << OBQSGEPARERR_S)
735#define KEEPALIVEMAXR1(x) ((x) << KEEPALIVEMAXR1_SHIFT) 1054#define OBQSGEPARERR_F OBQSGEPARERR_V(1U)
736#define KEEPALIVEMAXR1_GET(x) (((x) & KEEPALIVEMAXR1_MASK) >> \ 1055
737 KEEPALIVEMAXR1_SHIFT) 1056#define OBQNCSIPARERR_S 5
738#define KEEPALIVEMAXR2_SHIFT 0 1057#define OBQNCSIPARERR_V(x) ((x) << OBQNCSIPARERR_S)
739#define KEEPALIVEMAXR2_MASK 0x0000000fU 1058#define OBQNCSIPARERR_F OBQNCSIPARERR_V(1U)
740#define KEEPALIVEMAXR2(x) ((x) << KEEPALIVEMAXR2_SHIFT) 1059
741#define KEEPALIVEMAXR2_GET(x) (((x) & KEEPALIVEMAXR2_MASK) >> \ 1060#define CIM_HOST_UPACC_INT_CAUSE_A 0x7b34
742 KEEPALIVEMAXR2_SHIFT) 1061
743 1062#define EEPROMWRINT_S 30
744#define TP_CCTRL_TABLE 0x7ddc 1063#define EEPROMWRINT_V(x) ((x) << EEPROMWRINT_S)
745#define TP_MTU_TABLE 0x7de4 1064#define EEPROMWRINT_F EEPROMWRINT_V(1U)
746#define MTUINDEX_MASK 0xff000000U 1065
747#define MTUINDEX_SHIFT 24 1066#define TIMEOUTMAINT_S 29
748#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT) 1067#define TIMEOUTMAINT_V(x) ((x) << TIMEOUTMAINT_S)
749#define MTUWIDTH_MASK 0x000f0000U 1068#define TIMEOUTMAINT_F TIMEOUTMAINT_V(1U)
750#define MTUWIDTH_SHIFT 16 1069
751#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT) 1070#define TIMEOUTINT_S 28
752#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT) 1071#define TIMEOUTINT_V(x) ((x) << TIMEOUTINT_S)
753#define MTUVALUE_MASK 0x00003fffU 1072#define TIMEOUTINT_F TIMEOUTINT_V(1U)
754#define MTUVALUE_SHIFT 0 1073
755#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT) 1074#define RSPOVRLOOKUPINT_S 27
756#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT) 1075#define RSPOVRLOOKUPINT_V(x) ((x) << RSPOVRLOOKUPINT_S)
757 1076#define RSPOVRLOOKUPINT_F RSPOVRLOOKUPINT_V(1U)
758#define TP_RSS_LKP_TABLE 0x7dec 1077
759#define LKPTBLROWVLD 0x80000000U 1078#define REQOVRLOOKUPINT_S 26
760#define LKPTBLQUEUE1_MASK 0x000ffc00U 1079#define REQOVRLOOKUPINT_V(x) ((x) << REQOVRLOOKUPINT_S)
761#define LKPTBLQUEUE1_SHIFT 10 1080#define REQOVRLOOKUPINT_F REQOVRLOOKUPINT_V(1U)
762#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT) 1081
763#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT) 1082#define BLKWRPLINT_S 25
764#define LKPTBLQUEUE0_MASK 0x000003ffU 1083#define BLKWRPLINT_V(x) ((x) << BLKWRPLINT_S)
765#define LKPTBLQUEUE0_SHIFT 0 1084#define BLKWRPLINT_F BLKWRPLINT_V(1U)
766#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT) 1085
767#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT) 1086#define BLKRDPLINT_S 24
768 1087#define BLKRDPLINT_V(x) ((x) << BLKRDPLINT_S)
769#define TP_PIO_ADDR 0x7e40 1088#define BLKRDPLINT_F BLKRDPLINT_V(1U)
770#define TP_PIO_DATA 0x7e44 1089
771#define TP_MIB_INDEX 0x7e50 1090#define SGLWRPLINT_S 23
772#define TP_MIB_DATA 0x7e54 1091#define SGLWRPLINT_V(x) ((x) << SGLWRPLINT_S)
773#define TP_INT_CAUSE 0x7e74 1092#define SGLWRPLINT_F SGLWRPLINT_V(1U)
774#define FLMTXFLSTEMPTY 0x40000000U 1093
775 1094#define SGLRDPLINT_S 22
776#define TP_VLAN_PRI_MAP 0x140 1095#define SGLRDPLINT_V(x) ((x) << SGLRDPLINT_S)
777#define FRAGMENTATION_SHIFT 9 1096#define SGLRDPLINT_F SGLRDPLINT_V(1U)
778#define FRAGMENTATION_MASK 0x00000200U 1097
779#define MPSHITTYPE_MASK 0x00000100U 1098#define BLKWRCTLINT_S 21
780#define MACMATCH_MASK 0x00000080U 1099#define BLKWRCTLINT_V(x) ((x) << BLKWRCTLINT_S)
781#define ETHERTYPE_MASK 0x00000040U 1100#define BLKWRCTLINT_F BLKWRCTLINT_V(1U)
782#define PROTOCOL_MASK 0x00000020U 1101
783#define TOS_MASK 0x00000010U 1102#define BLKRDCTLINT_S 20
784#define VLAN_MASK 0x00000008U 1103#define BLKRDCTLINT_V(x) ((x) << BLKRDCTLINT_S)
785#define VNIC_ID_MASK 0x00000004U 1104#define BLKRDCTLINT_F BLKRDCTLINT_V(1U)
786#define PORT_MASK 0x00000002U 1105
787#define FCOE_SHIFT 0 1106#define SGLWRCTLINT_S 19
788#define FCOE_MASK 0x00000001U 1107#define SGLWRCTLINT_V(x) ((x) << SGLWRCTLINT_S)
789 1108#define SGLWRCTLINT_F SGLWRCTLINT_V(1U)
790#define TP_INGRESS_CONFIG 0x141 1109
791#define VNIC 0x00000800U 1110#define SGLRDCTLINT_S 18
792#define CSUM_HAS_PSEUDO_HDR 0x00000400U 1111#define SGLRDCTLINT_V(x) ((x) << SGLRDCTLINT_S)
793#define RM_OVLAN 0x00000200U 1112#define SGLRDCTLINT_F SGLRDCTLINT_V(1U)
794#define LOOKUPEVERYPKT 0x00000100U 1113
795 1114#define BLKWREEPROMINT_S 17
796#define TP_MIB_MAC_IN_ERR_0 0x0 1115#define BLKWREEPROMINT_V(x) ((x) << BLKWREEPROMINT_S)
797#define TP_MIB_TCP_OUT_RST 0xc 1116#define BLKWREEPROMINT_F BLKWREEPROMINT_V(1U)
798#define TP_MIB_TCP_IN_SEG_HI 0x10 1117
799#define TP_MIB_TCP_IN_SEG_LO 0x11 1118#define BLKRDEEPROMINT_S 16
800#define TP_MIB_TCP_OUT_SEG_HI 0x12 1119#define BLKRDEEPROMINT_V(x) ((x) << BLKRDEEPROMINT_S)
801#define TP_MIB_TCP_OUT_SEG_LO 0x13 1120#define BLKRDEEPROMINT_F BLKRDEEPROMINT_V(1U)
802#define TP_MIB_TCP_RXT_SEG_HI 0x14 1121
803#define TP_MIB_TCP_RXT_SEG_LO 0x15 1122#define SGLWREEPROMINT_S 15
804#define TP_MIB_TNL_CNG_DROP_0 0x18 1123#define SGLWREEPROMINT_V(x) ((x) << SGLWREEPROMINT_S)
805#define TP_MIB_TCP_V6IN_ERR_0 0x28 1124#define SGLWREEPROMINT_F SGLWREEPROMINT_V(1U)
806#define TP_MIB_TCP_V6OUT_RST 0x2c 1125
807#define TP_MIB_OFD_ARP_DROP 0x36 1126#define SGLRDEEPROMINT_S 14
808#define TP_MIB_TNL_DROP_0 0x44 1127#define SGLRDEEPROMINT_V(x) ((x) << SGLRDEEPROMINT_S)
809#define TP_MIB_OFD_VLN_DROP_0 0x58 1128#define SGLRDEEPROMINT_F SGLRDEEPROMINT_V(1U)
810 1129
811#define ULP_TX_INT_CAUSE 0x8dcc 1130#define BLKWRFLASHINT_S 13
812#define PBL_BOUND_ERR_CH3 0x80000000U 1131#define BLKWRFLASHINT_V(x) ((x) << BLKWRFLASHINT_S)
813#define PBL_BOUND_ERR_CH2 0x40000000U 1132#define BLKWRFLASHINT_F BLKWRFLASHINT_V(1U)
814#define PBL_BOUND_ERR_CH1 0x20000000U 1133
815#define PBL_BOUND_ERR_CH0 0x10000000U 1134#define BLKRDFLASHINT_S 12
816 1135#define BLKRDFLASHINT_V(x) ((x) << BLKRDFLASHINT_S)
817#define PM_RX_INT_CAUSE 0x8fdc 1136#define BLKRDFLASHINT_F BLKRDFLASHINT_V(1U)
818#define ZERO_E_CMD_ERROR 0x00400000U 1137
819#define PMRX_FRAMING_ERROR 0x003ffff0U 1138#define SGLWRFLASHINT_S 11
820#define OCSPI_PAR_ERROR 0x00000008U 1139#define SGLWRFLASHINT_V(x) ((x) << SGLWRFLASHINT_S)
821#define DB_OPTIONS_PAR_ERROR 0x00000004U 1140#define SGLWRFLASHINT_F SGLWRFLASHINT_V(1U)
822#define IESPI_PAR_ERROR 0x00000002U 1141
823#define E_PCMD_PAR_ERROR 0x00000001U 1142#define SGLRDFLASHINT_S 10
824 1143#define SGLRDFLASHINT_V(x) ((x) << SGLRDFLASHINT_S)
825#define PM_TX_INT_CAUSE 0x8ffc 1144#define SGLRDFLASHINT_F SGLRDFLASHINT_V(1U)
826#define PCMD_LEN_OVFL0 0x80000000U 1145
827#define PCMD_LEN_OVFL1 0x40000000U 1146#define BLKWRBOOTINT_S 9
828#define PCMD_LEN_OVFL2 0x20000000U 1147#define BLKWRBOOTINT_V(x) ((x) << BLKWRBOOTINT_S)
829#define ZERO_C_CMD_ERROR 0x10000000U 1148#define BLKWRBOOTINT_F BLKWRBOOTINT_V(1U)
830#define PMTX_FRAMING_ERROR 0x0ffffff0U 1149
831#define OESPI_PAR_ERROR 0x00000008U 1150#define BLKRDBOOTINT_S 8
832#define ICSPI_PAR_ERROR 0x00000002U 1151#define BLKRDBOOTINT_V(x) ((x) << BLKRDBOOTINT_S)
833#define C_PCMD_PAR_ERROR 0x00000001U 1152#define BLKRDBOOTINT_F BLKRDBOOTINT_V(1U)
1153
1154#define SGLWRBOOTINT_S 7
1155#define SGLWRBOOTINT_V(x) ((x) << SGLWRBOOTINT_S)
1156#define SGLWRBOOTINT_F SGLWRBOOTINT_V(1U)
1157
1158#define SGLRDBOOTINT_S 6
1159#define SGLRDBOOTINT_V(x) ((x) << SGLRDBOOTINT_S)
1160#define SGLRDBOOTINT_F SGLRDBOOTINT_V(1U)
1161
1162#define ILLWRBEINT_S 5
1163#define ILLWRBEINT_V(x) ((x) << ILLWRBEINT_S)
1164#define ILLWRBEINT_F ILLWRBEINT_V(1U)
1165
1166#define ILLRDBEINT_S 4
1167#define ILLRDBEINT_V(x) ((x) << ILLRDBEINT_S)
1168#define ILLRDBEINT_F ILLRDBEINT_V(1U)
1169
1170#define ILLRDINT_S 3
1171#define ILLRDINT_V(x) ((x) << ILLRDINT_S)
1172#define ILLRDINT_F ILLRDINT_V(1U)
1173
1174#define ILLWRINT_S 2
1175#define ILLWRINT_V(x) ((x) << ILLWRINT_S)
1176#define ILLWRINT_F ILLWRINT_V(1U)
1177
1178#define ILLTRANSINT_S 1
1179#define ILLTRANSINT_V(x) ((x) << ILLTRANSINT_S)
1180#define ILLTRANSINT_F ILLTRANSINT_V(1U)
1181
1182#define RSVDSPACEINT_S 0
1183#define RSVDSPACEINT_V(x) ((x) << RSVDSPACEINT_S)
1184#define RSVDSPACEINT_F RSVDSPACEINT_V(1U)
1185
1186/* registers for module TP */
1187#define DBGLAWHLF_S 23
1188#define DBGLAWHLF_V(x) ((x) << DBGLAWHLF_S)
1189#define DBGLAWHLF_F DBGLAWHLF_V(1U)
1190
1191#define DBGLAWPTR_S 16
1192#define DBGLAWPTR_M 0x7fU
1193#define DBGLAWPTR_G(x) (((x) >> DBGLAWPTR_S) & DBGLAWPTR_M)
1194
1195#define DBGLAENABLE_S 12
1196#define DBGLAENABLE_V(x) ((x) << DBGLAENABLE_S)
1197#define DBGLAENABLE_F DBGLAENABLE_V(1U)
1198
1199#define DBGLARPTR_S 0
1200#define DBGLARPTR_M 0x7fU
1201#define DBGLARPTR_V(x) ((x) << DBGLARPTR_S)
1202
1203#define TP_DBG_LA_DATAL_A 0x7ed8
1204#define TP_DBG_LA_CONFIG_A 0x7ed4
1205#define TP_OUT_CONFIG_A 0x7d04
1206#define TP_GLOBAL_CONFIG_A 0x7d08
1207
1208#define DBGLAMODE_S 14
1209#define DBGLAMODE_M 0x3U
1210#define DBGLAMODE_G(x) (((x) >> DBGLAMODE_S) & DBGLAMODE_M)
1211
1212#define FIVETUPLELOOKUP_S 17
1213#define FIVETUPLELOOKUP_M 0x3U
1214#define FIVETUPLELOOKUP_V(x) ((x) << FIVETUPLELOOKUP_S)
1215#define FIVETUPLELOOKUP_G(x) (((x) >> FIVETUPLELOOKUP_S) & FIVETUPLELOOKUP_M)
1216
1217#define TP_PARA_REG2_A 0x7d68
1218
1219#define MAXRXDATA_S 16
1220#define MAXRXDATA_M 0xffffU
1221#define MAXRXDATA_G(x) (((x) >> MAXRXDATA_S) & MAXRXDATA_M)
1222
1223#define TP_TIMER_RESOLUTION_A 0x7d90
1224
1225#define TIMERRESOLUTION_S 16
1226#define TIMERRESOLUTION_M 0xffU
1227#define TIMERRESOLUTION_G(x) (((x) >> TIMERRESOLUTION_S) & TIMERRESOLUTION_M)
1228
1229#define TIMESTAMPRESOLUTION_S 8
1230#define TIMESTAMPRESOLUTION_M 0xffU
1231#define TIMESTAMPRESOLUTION_G(x) \
1232 (((x) >> TIMESTAMPRESOLUTION_S) & TIMESTAMPRESOLUTION_M)
1233
1234#define DELAYEDACKRESOLUTION_S 0
1235#define DELAYEDACKRESOLUTION_M 0xffU
1236#define DELAYEDACKRESOLUTION_G(x) \
1237 (((x) >> DELAYEDACKRESOLUTION_S) & DELAYEDACKRESOLUTION_M)
1238
1239#define TP_SHIFT_CNT_A 0x7dc0
1240#define TP_RXT_MIN_A 0x7d98
1241#define TP_RXT_MAX_A 0x7d9c
1242#define TP_PERS_MIN_A 0x7da0
1243#define TP_PERS_MAX_A 0x7da4
1244#define TP_KEEP_IDLE_A 0x7da8
1245#define TP_KEEP_INTVL_A 0x7dac
1246#define TP_INIT_SRTT_A 0x7db0
1247#define TP_DACK_TIMER_A 0x7db4
1248#define TP_FINWAIT2_TIMER_A 0x7db8
1249
1250#define INITSRTT_S 0
1251#define INITSRTT_M 0xffffU
1252#define INITSRTT_G(x) (((x) >> INITSRTT_S) & INITSRTT_M)
1253
1254#define PERSMAX_S 0
1255#define PERSMAX_M 0x3fffffffU
1256#define PERSMAX_V(x) ((x) << PERSMAX_S)
1257#define PERSMAX_G(x) (((x) >> PERSMAX_S) & PERSMAX_M)
1258
1259#define SYNSHIFTMAX_S 24
1260#define SYNSHIFTMAX_M 0xffU
1261#define SYNSHIFTMAX_V(x) ((x) << SYNSHIFTMAX_S)
1262#define SYNSHIFTMAX_G(x) (((x) >> SYNSHIFTMAX_S) & SYNSHIFTMAX_M)
1263
1264#define RXTSHIFTMAXR1_S 20
1265#define RXTSHIFTMAXR1_M 0xfU
1266#define RXTSHIFTMAXR1_V(x) ((x) << RXTSHIFTMAXR1_S)
1267#define RXTSHIFTMAXR1_G(x) (((x) >> RXTSHIFTMAXR1_S) & RXTSHIFTMAXR1_M)
1268
1269#define RXTSHIFTMAXR2_S 16
1270#define RXTSHIFTMAXR2_M 0xfU
1271#define RXTSHIFTMAXR2_V(x) ((x) << RXTSHIFTMAXR2_S)
1272#define RXTSHIFTMAXR2_G(x) (((x) >> RXTSHIFTMAXR2_S) & RXTSHIFTMAXR2_M)
1273
1274#define PERSHIFTBACKOFFMAX_S 12
1275#define PERSHIFTBACKOFFMAX_M 0xfU
1276#define PERSHIFTBACKOFFMAX_V(x) ((x) << PERSHIFTBACKOFFMAX_S)
1277#define PERSHIFTBACKOFFMAX_G(x) \
1278 (((x) >> PERSHIFTBACKOFFMAX_S) & PERSHIFTBACKOFFMAX_M)
1279
1280#define PERSHIFTMAX_S 8
1281#define PERSHIFTMAX_M 0xfU
1282#define PERSHIFTMAX_V(x) ((x) << PERSHIFTMAX_S)
1283#define PERSHIFTMAX_G(x) (((x) >> PERSHIFTMAX_S) & PERSHIFTMAX_M)
1284
1285#define KEEPALIVEMAXR1_S 4
1286#define KEEPALIVEMAXR1_M 0xfU
1287#define KEEPALIVEMAXR1_V(x) ((x) << KEEPALIVEMAXR1_S)
1288#define KEEPALIVEMAXR1_G(x) (((x) >> KEEPALIVEMAXR1_S) & KEEPALIVEMAXR1_M)
1289
1290#define KEEPALIVEMAXR2_S 0
1291#define KEEPALIVEMAXR2_M 0xfU
1292#define KEEPALIVEMAXR2_V(x) ((x) << KEEPALIVEMAXR2_S)
1293#define KEEPALIVEMAXR2_G(x) (((x) >> KEEPALIVEMAXR2_S) & KEEPALIVEMAXR2_M)
1294
1295#define ROWINDEX_S 16
1296#define ROWINDEX_V(x) ((x) << ROWINDEX_S)
1297
1298#define TP_CCTRL_TABLE_A 0x7ddc
1299#define TP_MTU_TABLE_A 0x7de4
1300
1301#define MTUINDEX_S 24
1302#define MTUINDEX_V(x) ((x) << MTUINDEX_S)
1303
1304#define MTUWIDTH_S 16
1305#define MTUWIDTH_M 0xfU
1306#define MTUWIDTH_V(x) ((x) << MTUWIDTH_S)
1307#define MTUWIDTH_G(x) (((x) >> MTUWIDTH_S) & MTUWIDTH_M)
1308
1309#define MTUVALUE_S 0
1310#define MTUVALUE_M 0x3fffU
1311#define MTUVALUE_V(x) ((x) << MTUVALUE_S)
1312#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
1313
1314#define TP_RSS_LKP_TABLE_A 0x7dec
1315
1316#define LKPTBLROWVLD_S 31
1317#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
1318#define LKPTBLROWVLD_F LKPTBLROWVLD_V(1U)
1319
1320#define LKPTBLQUEUE1_S 10
1321#define LKPTBLQUEUE1_M 0x3ffU
1322#define LKPTBLQUEUE1_G(x) (((x) >> LKPTBLQUEUE1_S) & LKPTBLQUEUE1_M)
1323
1324#define LKPTBLQUEUE0_S 0
1325#define LKPTBLQUEUE0_M 0x3ffU
1326#define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M)
1327
1328#define TP_PIO_ADDR_A 0x7e40
1329#define TP_PIO_DATA_A 0x7e44
1330#define TP_MIB_INDEX_A 0x7e50
1331#define TP_MIB_DATA_A 0x7e54
1332#define TP_INT_CAUSE_A 0x7e74
1333
1334#define FLMTXFLSTEMPTY_S 30
1335#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
1336#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
1337
1338#define TP_VLAN_PRI_MAP_A 0x140
1339
1340#define FRAGMENTATION_S 9
1341#define FRAGMENTATION_V(x) ((x) << FRAGMENTATION_S)
1342#define FRAGMENTATION_F FRAGMENTATION_V(1U)
1343
1344#define MPSHITTYPE_S 8
1345#define MPSHITTYPE_V(x) ((x) << MPSHITTYPE_S)
1346#define MPSHITTYPE_F MPSHITTYPE_V(1U)
1347
1348#define MACMATCH_S 7
1349#define MACMATCH_V(x) ((x) << MACMATCH_S)
1350#define MACMATCH_F MACMATCH_V(1U)
1351
1352#define ETHERTYPE_S 6
1353#define ETHERTYPE_V(x) ((x) << ETHERTYPE_S)
1354#define ETHERTYPE_F ETHERTYPE_V(1U)
1355
1356#define PROTOCOL_S 5
1357#define PROTOCOL_V(x) ((x) << PROTOCOL_S)
1358#define PROTOCOL_F PROTOCOL_V(1U)
1359
1360#define TOS_S 4
1361#define TOS_V(x) ((x) << TOS_S)
1362#define TOS_F TOS_V(1U)
1363
1364#define VLAN_S 3
1365#define VLAN_V(x) ((x) << VLAN_S)
1366#define VLAN_F VLAN_V(1U)
1367
1368#define VNIC_ID_S 2
1369#define VNIC_ID_V(x) ((x) << VNIC_ID_S)
1370#define VNIC_ID_F VNIC_ID_V(1U)
1371
1372#define PORT_S 1
1373#define PORT_V(x) ((x) << PORT_S)
1374#define PORT_F PORT_V(1U)
1375
1376#define FCOE_S 0
1377#define FCOE_V(x) ((x) << FCOE_S)
1378#define FCOE_F FCOE_V(1U)
1379
1380#define FILTERMODE_S 15
1381#define FILTERMODE_V(x) ((x) << FILTERMODE_S)
1382#define FILTERMODE_F FILTERMODE_V(1U)
1383
1384#define FCOEMASK_S 14
1385#define FCOEMASK_V(x) ((x) << FCOEMASK_S)
1386#define FCOEMASK_F FCOEMASK_V(1U)
1387
1388#define TP_INGRESS_CONFIG_A 0x141
1389
1390#define VNIC_S 11
1391#define VNIC_V(x) ((x) << VNIC_S)
1392#define VNIC_F VNIC_V(1U)
1393
1394#define CSUM_HAS_PSEUDO_HDR_S 10
1395#define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S)
1396#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U)
1397
1398#define TP_MIB_MAC_IN_ERR_0_A 0x0
1399#define TP_MIB_TCP_OUT_RST_A 0xc
1400#define TP_MIB_TCP_IN_SEG_HI_A 0x10
1401#define TP_MIB_TCP_IN_SEG_LO_A 0x11
1402#define TP_MIB_TCP_OUT_SEG_HI_A 0x12
1403#define TP_MIB_TCP_OUT_SEG_LO_A 0x13
1404#define TP_MIB_TCP_RXT_SEG_HI_A 0x14
1405#define TP_MIB_TCP_RXT_SEG_LO_A 0x15
1406#define TP_MIB_TNL_CNG_DROP_0_A 0x18
1407#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
1408#define TP_MIB_TCP_V6OUT_RST_A 0x2c
1409#define TP_MIB_OFD_ARP_DROP_A 0x36
1410#define TP_MIB_TNL_DROP_0_A 0x44
1411#define TP_MIB_OFD_VLN_DROP_0_A 0x58
1412
1413#define ULP_TX_INT_CAUSE_A 0x8dcc
1414
1415#define PBL_BOUND_ERR_CH3_S 31
1416#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
1417#define PBL_BOUND_ERR_CH3_F PBL_BOUND_ERR_CH3_V(1U)
1418
1419#define PBL_BOUND_ERR_CH2_S 30
1420#define PBL_BOUND_ERR_CH2_V(x) ((x) << PBL_BOUND_ERR_CH2_S)
1421#define PBL_BOUND_ERR_CH2_F PBL_BOUND_ERR_CH2_V(1U)
1422
1423#define PBL_BOUND_ERR_CH1_S 29
1424#define PBL_BOUND_ERR_CH1_V(x) ((x) << PBL_BOUND_ERR_CH1_S)
1425#define PBL_BOUND_ERR_CH1_F PBL_BOUND_ERR_CH1_V(1U)
1426
1427#define PBL_BOUND_ERR_CH0_S 28
1428#define PBL_BOUND_ERR_CH0_V(x) ((x) << PBL_BOUND_ERR_CH0_S)
1429#define PBL_BOUND_ERR_CH0_F PBL_BOUND_ERR_CH0_V(1U)
1430
1431#define PM_RX_INT_CAUSE_A 0x8fdc
1432#define PM_RX_STAT_CONFIG_A 0x8fc8
1433#define PM_RX_STAT_COUNT_A 0x8fcc
1434#define PM_RX_STAT_LSB_A 0x8fd0
1435#define PM_RX_DBG_CTRL_A 0x8fd0
1436#define PM_RX_DBG_DATA_A 0x8fd4
1437#define PM_RX_DBG_STAT_MSB_A 0x10013
1438
1439#define PMRX_FRAMING_ERROR_F 0x003ffff0U
1440
1441#define ZERO_E_CMD_ERROR_S 22
1442#define ZERO_E_CMD_ERROR_V(x) ((x) << ZERO_E_CMD_ERROR_S)
1443#define ZERO_E_CMD_ERROR_F ZERO_E_CMD_ERROR_V(1U)
1444
1445#define OCSPI_PAR_ERROR_S 3
1446#define OCSPI_PAR_ERROR_V(x) ((x) << OCSPI_PAR_ERROR_S)
1447#define OCSPI_PAR_ERROR_F OCSPI_PAR_ERROR_V(1U)
1448
1449#define DB_OPTIONS_PAR_ERROR_S 2
1450#define DB_OPTIONS_PAR_ERROR_V(x) ((x) << DB_OPTIONS_PAR_ERROR_S)
1451#define DB_OPTIONS_PAR_ERROR_F DB_OPTIONS_PAR_ERROR_V(1U)
1452
1453#define IESPI_PAR_ERROR_S 1
1454#define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S)
1455#define IESPI_PAR_ERROR_F IESPI_PAR_ERROR_V(1U)
1456
1457#define PMRX_E_PCMD_PAR_ERROR_S 0
1458#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
1459#define PMRX_E_PCMD_PAR_ERROR_F PMRX_E_PCMD_PAR_ERROR_V(1U)
1460
1461#define PM_TX_INT_CAUSE_A 0x8ffc
1462#define PM_TX_STAT_CONFIG_A 0x8fe8
1463#define PM_TX_STAT_COUNT_A 0x8fec
1464#define PM_TX_STAT_LSB_A 0x8ff0
1465#define PM_TX_DBG_CTRL_A 0x8ff0
1466#define PM_TX_DBG_DATA_A 0x8ff4
1467#define PM_TX_DBG_STAT_MSB_A 0x1001a
1468
1469#define PCMD_LEN_OVFL0_S 31
1470#define PCMD_LEN_OVFL0_V(x) ((x) << PCMD_LEN_OVFL0_S)
1471#define PCMD_LEN_OVFL0_F PCMD_LEN_OVFL0_V(1U)
1472
1473#define PCMD_LEN_OVFL1_S 30
1474#define PCMD_LEN_OVFL1_V(x) ((x) << PCMD_LEN_OVFL1_S)
1475#define PCMD_LEN_OVFL1_F PCMD_LEN_OVFL1_V(1U)
1476
1477#define PCMD_LEN_OVFL2_S 29
1478#define PCMD_LEN_OVFL2_V(x) ((x) << PCMD_LEN_OVFL2_S)
1479#define PCMD_LEN_OVFL2_F PCMD_LEN_OVFL2_V(1U)
1480
1481#define ZERO_C_CMD_ERROR_S 28
1482#define ZERO_C_CMD_ERROR_V(x) ((x) << ZERO_C_CMD_ERROR_S)
1483#define ZERO_C_CMD_ERROR_F ZERO_C_CMD_ERROR_V(1U)
1484
1485#define PMTX_FRAMING_ERROR_F 0x0ffffff0U
1486
1487#define OESPI_PAR_ERROR_S 3
1488#define OESPI_PAR_ERROR_V(x) ((x) << OESPI_PAR_ERROR_S)
1489#define OESPI_PAR_ERROR_F OESPI_PAR_ERROR_V(1U)
1490
1491#define ICSPI_PAR_ERROR_S 1
1492#define ICSPI_PAR_ERROR_V(x) ((x) << ICSPI_PAR_ERROR_S)
1493#define ICSPI_PAR_ERROR_F ICSPI_PAR_ERROR_V(1U)
1494
1495#define PMTX_C_PCMD_PAR_ERROR_S 0
1496#define PMTX_C_PCMD_PAR_ERROR_V(x) ((x) << PMTX_C_PCMD_PAR_ERROR_S)
1497#define PMTX_C_PCMD_PAR_ERROR_F PMTX_C_PCMD_PAR_ERROR_V(1U)
834 1498
835#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 1499#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
836#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 1500#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
@@ -959,41 +1623,57 @@
959#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c 1623#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
960#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 1624#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
961#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 1625#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
962#define MAC_PORT_CFG2 0x818
963#define MAC_PORT_MAGIC_MACID_LO 0x824 1626#define MAC_PORT_MAGIC_MACID_LO 0x824
964#define MAC_PORT_MAGIC_MACID_HI 0x828 1627#define MAC_PORT_MAGIC_MACID_HI 0x828
965#define MAC_PORT_EPIO_DATA0 0x8c0 1628
966#define MAC_PORT_EPIO_DATA1 0x8c4 1629#define MAC_PORT_EPIO_DATA0_A 0x8c0
967#define MAC_PORT_EPIO_DATA2 0x8c8 1630#define MAC_PORT_EPIO_DATA1_A 0x8c4
968#define MAC_PORT_EPIO_DATA3 0x8cc 1631#define MAC_PORT_EPIO_DATA2_A 0x8c8
969#define MAC_PORT_EPIO_OP 0x8d0 1632#define MAC_PORT_EPIO_DATA3_A 0x8cc
970 1633#define MAC_PORT_EPIO_OP_A 0x8d0
971#define MPS_CMN_CTL 0x9000 1634
972#define NUMPORTS_MASK 0x00000003U 1635#define MAC_PORT_CFG2_A 0x818
973#define NUMPORTS_SHIFT 0 1636
974#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT) 1637#define MPS_CMN_CTL_A 0x9000
975 1638
976#define MPS_INT_CAUSE 0x9008 1639#define NUMPORTS_S 0
977#define STATINT 0x00000020U 1640#define NUMPORTS_M 0x3U
978#define TXINT 0x00000010U 1641#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
979#define RXINT 0x00000008U 1642
980#define TRCINT 0x00000004U 1643#define MPS_INT_CAUSE_A 0x9008
981#define CLSINT 0x00000002U 1644#define MPS_TX_INT_CAUSE_A 0x9408
982#define PLINT 0x00000001U 1645
983 1646#define FRMERR_S 15
984#define MPS_TX_INT_CAUSE 0x9408 1647#define FRMERR_V(x) ((x) << FRMERR_S)
985#define PORTERR 0x00010000U 1648#define FRMERR_F FRMERR_V(1U)
986#define FRMERR 0x00008000U 1649
987#define SECNTERR 0x00004000U 1650#define SECNTERR_S 14
988#define BUBBLE 0x00002000U 1651#define SECNTERR_V(x) ((x) << SECNTERR_S)
989#define TXDESCFIFO 0x00001e00U 1652#define SECNTERR_F SECNTERR_V(1U)
990#define TXDATAFIFO 0x000001e0U 1653
991#define NCSIFIFO 0x00000010U 1654#define BUBBLE_S 13
992#define TPFIFO 0x0000000fU 1655#define BUBBLE_V(x) ((x) << BUBBLE_S)
993 1656#define BUBBLE_F BUBBLE_V(1U)
994#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614 1657
995#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620 1658#define TXDESCFIFO_S 9
996#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c 1659#define TXDESCFIFO_M 0xfU
1660#define TXDESCFIFO_V(x) ((x) << TXDESCFIFO_S)
1661
1662#define TXDATAFIFO_S 5
1663#define TXDATAFIFO_M 0xfU
1664#define TXDATAFIFO_V(x) ((x) << TXDATAFIFO_S)
1665
1666#define NCSIFIFO_S 4
1667#define NCSIFIFO_V(x) ((x) << NCSIFIFO_S)
1668#define NCSIFIFO_F NCSIFIFO_V(1U)
1669
1670#define TPFIFO_S 0
1671#define TPFIFO_M 0xfU
1672#define TPFIFO_V(x) ((x) << TPFIFO_S)
1673
1674#define MPS_STAT_PERR_INT_CAUSE_SRAM_A 0x9614
1675#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A 0x9620
1676#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A 0x962c
997 1677
998#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 1678#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
999#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 1679#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
@@ -1027,294 +1707,851 @@
1027#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 1707#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
1028#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 1708#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
1029#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc 1709#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
1030#define MPS_TRC_CFG 0x9800
1031#define TRCFIFOEMPTY 0x00000010U
1032#define TRCIGNOREDROPINPUT 0x00000008U
1033#define TRCKEEPDUPLICATES 0x00000004U
1034#define TRCEN 0x00000002U
1035#define TRCMULTIFILTER 0x00000001U
1036
1037#define MPS_TRC_RSS_CONTROL 0x9808
1038#define MPS_T5_TRC_RSS_CONTROL 0xa00c
1039#define RSSCONTROL_MASK 0x00ff0000U
1040#define RSSCONTROL_SHIFT 16
1041#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
1042#define QUEUENUMBER_MASK 0x0000ffffU
1043#define QUEUENUMBER_SHIFT 0
1044#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT)
1045
1046#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
1047#define TFINVERTMATCH 0x01000000U
1048#define TFPKTTOOLARGE 0x00800000U
1049#define TFEN 0x00400000U
1050#define TFPORT_MASK 0x003c0000U
1051#define TFPORT_SHIFT 18
1052#define TFPORT(x) ((x) << TFPORT_SHIFT)
1053#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
1054#define TFDROP 0x00020000U
1055#define TFSOPEOPERR 0x00010000U
1056#define TFLENGTH_MASK 0x00001f00U
1057#define TFLENGTH_SHIFT 8
1058#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT)
1059#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
1060#define TFOFFSET_MASK 0x0000001fU
1061#define TFOFFSET_SHIFT 0
1062#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT)
1063#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
1064
1065#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
1066#define TFMINPKTSIZE_MASK 0x01ff0000U
1067#define TFMINPKTSIZE_SHIFT 16
1068#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT)
1069#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
1070#define TFCAPTUREMAX_MASK 0x00003fffU
1071#define TFCAPTUREMAX_SHIFT 0
1072#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT)
1073#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
1074
1075#define MPS_TRC_INT_CAUSE 0x985c
1076#define MISCPERR 0x00000100U
1077#define PKTFIFO 0x000000f0U
1078#define FILTMEM 0x0000000fU
1079
1080#define MPS_TRC_FILTER0_MATCH 0x9c00
1081#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
1082#define MPS_TRC_FILTER1_MATCH 0x9d00
1083#define MPS_CLS_INT_CAUSE 0xd028
1084#define PLERRENB 0x00000008U
1085#define HASHSRAM 0x00000004U
1086#define MATCHTCAM 0x00000002U
1087#define MATCHSRAM 0x00000001U
1088
1089#define MPS_RX_PERR_INT_CAUSE 0x11074
1090
1091#define CPL_INTR_CAUSE 0x19054
1092#define CIM_OP_MAP_PERR 0x00000020U
1093#define CIM_OVFL_ERROR 0x00000010U
1094#define TP_FRAMING_ERROR 0x00000008U
1095#define SGE_FRAMING_ERROR 0x00000004U
1096#define CIM_FRAMING_ERROR 0x00000002U
1097#define ZERO_SWITCH_ERROR 0x00000001U
1098
1099#define SMB_INT_CAUSE 0x19090
1100#define MSTTXFIFOPARINT 0x00200000U
1101#define MSTRXFIFOPARINT 0x00100000U
1102#define SLVFIFOPARINT 0x00080000U
1103
1104#define ULP_RX_INT_CAUSE 0x19158
1105#define ULP_RX_ISCSI_TAGMASK 0x19164
1106#define ULP_RX_ISCSI_PSZ 0x19168
1107#define HPZ3_MASK 0x0f000000U
1108#define HPZ3_SHIFT 24
1109#define HPZ3(x) ((x) << HPZ3_SHIFT)
1110#define HPZ2_MASK 0x000f0000U
1111#define HPZ2_SHIFT 16
1112#define HPZ2(x) ((x) << HPZ2_SHIFT)
1113#define HPZ1_MASK 0x00000f00U
1114#define HPZ1_SHIFT 8
1115#define HPZ1(x) ((x) << HPZ1_SHIFT)
1116#define HPZ0_MASK 0x0000000fU
1117#define HPZ0_SHIFT 0
1118#define HPZ0(x) ((x) << HPZ0_SHIFT)
1119
1120#define ULP_RX_TDDP_PSZ 0x19178
1121
1122#define SF_DATA 0x193f8
1123#define SF_OP 0x193fc
1124#define SF_BUSY 0x80000000U
1125#define SF_LOCK 0x00000010U
1126#define SF_CONT 0x00000008U
1127#define BYTECNT_MASK 0x00000006U
1128#define BYTECNT_SHIFT 1
1129#define BYTECNT(x) ((x) << BYTECNT_SHIFT)
1130#define OP_WR 0x00000001U
1131
1132#define PL_PF_INT_CAUSE 0x3c0
1133#define PFSW 0x00000008U
1134#define PFSGE 0x00000004U
1135#define PFCIM 0x00000002U
1136#define PFMPS 0x00000001U
1137
1138#define PL_PF_INT_ENABLE 0x3c4
1139#define PL_PF_CTL 0x3c8
1140#define SWINT 0x00000001U
1141
1142#define PL_WHOAMI 0x19400
1143#define SOURCEPF_MASK 0x00000700U
1144#define SOURCEPF_SHIFT 8
1145#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT)
1146#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
1147#define ISVF 0x00000080U
1148#define VFID_MASK 0x0000007fU
1149#define VFID_SHIFT 0
1150#define VFID(x) ((x) << VFID_SHIFT)
1151#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT)
1152
1153#define PL_INT_CAUSE 0x1940c
1154#define ULP_TX 0x08000000U
1155#define SGE 0x04000000U
1156#define HMA 0x02000000U
1157#define CPL_SWITCH 0x01000000U
1158#define ULP_RX 0x00800000U
1159#define PM_RX 0x00400000U
1160#define PM_TX 0x00200000U
1161#define MA 0x00100000U
1162#define TP 0x00080000U
1163#define LE 0x00040000U
1164#define EDC1 0x00020000U
1165#define EDC0 0x00010000U
1166#define MC 0x00008000U
1167#define PCIE 0x00004000U
1168#define PMU 0x00002000U
1169#define XGMAC_KR1 0x00001000U
1170#define XGMAC_KR0 0x00000800U
1171#define XGMAC1 0x00000400U
1172#define XGMAC0 0x00000200U
1173#define SMB 0x00000100U
1174#define SF 0x00000080U
1175#define PL 0x00000040U
1176#define NCSI 0x00000020U
1177#define MPS 0x00000010U
1178#define MI 0x00000008U
1179#define DBG 0x00000004U
1180#define I2CM 0x00000002U
1181#define CIM 0x00000001U
1182
1183#define MC1 0x31
1184#define PL_INT_ENABLE 0x19410
1185#define PL_INT_MAP0 0x19414
1186#define PL_RST 0x19428
1187#define PIORST 0x00000002U
1188#define PIORSTMODE 0x00000001U
1189
1190#define PL_PL_INT_CAUSE 0x19430
1191#define FATALPERR 0x00000010U
1192#define PERRVFID 0x00000001U
1193
1194#define PL_REV 0x1943c
1195
1196#define S_REV 0
1197#define M_REV 0xfU
1198#define V_REV(x) ((x) << S_REV)
1199#define G_REV(x) (((x) >> S_REV) & M_REV)
1200
1201#define LE_DB_CONFIG 0x19c04
1202#define HASHEN 0x00100000U
1203
1204#define LE_DB_SERVER_INDEX 0x19c18
1205#define LE_DB_ACT_CNT_IPV4 0x19c20
1206#define LE_DB_ACT_CNT_IPV6 0x19c24
1207
1208#define LE_DB_INT_CAUSE 0x19c3c
1209#define REQQPARERR 0x00010000U
1210#define UNKNOWNCMD 0x00008000U
1211#define PARITYERR 0x00000040U
1212#define LIPMISS 0x00000020U
1213#define LIP0 0x00000010U
1214
1215#define LE_DB_TID_HASHBASE 0x19df8
1216
1217#define NCSI_INT_CAUSE 0x1a0d8
1218#define CIM_DM_PRTY_ERR 0x00000100U
1219#define MPS_DM_PRTY_ERR 0x00000080U
1220#define TXFIFO_PRTY_ERR 0x00000002U
1221#define RXFIFO_PRTY_ERR 0x00000001U
1222
1223#define XGMAC_PORT_CFG2 0x1018
1224#define PATEN 0x00040000U
1225#define MAGICEN 0x00020000U
1226 1710
1227#define XGMAC_PORT_MAGIC_MACID_LO 0x1024 1711#define MPS_TRC_CFG_A 0x9800
1228#define XGMAC_PORT_MAGIC_MACID_HI 0x1028 1712
1713#define TRCFIFOEMPTY_S 4
1714#define TRCFIFOEMPTY_V(x) ((x) << TRCFIFOEMPTY_S)
1715#define TRCFIFOEMPTY_F TRCFIFOEMPTY_V(1U)
1716
1717#define TRCIGNOREDROPINPUT_S 3
1718#define TRCIGNOREDROPINPUT_V(x) ((x) << TRCIGNOREDROPINPUT_S)
1719#define TRCIGNOREDROPINPUT_F TRCIGNOREDROPINPUT_V(1U)
1720
1721#define TRCKEEPDUPLICATES_S 2
1722#define TRCKEEPDUPLICATES_V(x) ((x) << TRCKEEPDUPLICATES_S)
1723#define TRCKEEPDUPLICATES_F TRCKEEPDUPLICATES_V(1U)
1724
1725#define TRCEN_S 1
1726#define TRCEN_V(x) ((x) << TRCEN_S)
1727#define TRCEN_F TRCEN_V(1U)
1728
1729#define TRCMULTIFILTER_S 0
1730#define TRCMULTIFILTER_V(x) ((x) << TRCMULTIFILTER_S)
1731#define TRCMULTIFILTER_F TRCMULTIFILTER_V(1U)
1732
1733#define MPS_TRC_RSS_CONTROL_A 0x9808
1734#define MPS_T5_TRC_RSS_CONTROL_A 0xa00c
1735
1736#define RSSCONTROL_S 16
1737#define RSSCONTROL_V(x) ((x) << RSSCONTROL_S)
1738
1739#define QUEUENUMBER_S 0
1740#define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
1741
1742#define TP_RSS_CONFIG_A 0x7df0
1743
1744#define TNL4TUPENIPV6_S 31
1745#define TNL4TUPENIPV6_V(x) ((x) << TNL4TUPENIPV6_S)
1746#define TNL4TUPENIPV6_F TNL4TUPENIPV6_V(1U)
1747
1748#define TNL2TUPENIPV6_S 30
1749#define TNL2TUPENIPV6_V(x) ((x) << TNL2TUPENIPV6_S)
1750#define TNL2TUPENIPV6_F TNL2TUPENIPV6_V(1U)
1751
1752#define TNL4TUPENIPV4_S 29
1753#define TNL4TUPENIPV4_V(x) ((x) << TNL4TUPENIPV4_S)
1754#define TNL4TUPENIPV4_F TNL4TUPENIPV4_V(1U)
1755
1756#define TNL2TUPENIPV4_S 28
1757#define TNL2TUPENIPV4_V(x) ((x) << TNL2TUPENIPV4_S)
1758#define TNL2TUPENIPV4_F TNL2TUPENIPV4_V(1U)
1759
1760#define TNLTCPSEL_S 27
1761#define TNLTCPSEL_V(x) ((x) << TNLTCPSEL_S)
1762#define TNLTCPSEL_F TNLTCPSEL_V(1U)
1763
1764#define TNLIP6SEL_S 26
1765#define TNLIP6SEL_V(x) ((x) << TNLIP6SEL_S)
1766#define TNLIP6SEL_F TNLIP6SEL_V(1U)
1767
1768#define TNLVRTSEL_S 25
1769#define TNLVRTSEL_V(x) ((x) << TNLVRTSEL_S)
1770#define TNLVRTSEL_F TNLVRTSEL_V(1U)
1771
1772#define TNLMAPEN_S 24
1773#define TNLMAPEN_V(x) ((x) << TNLMAPEN_S)
1774#define TNLMAPEN_F TNLMAPEN_V(1U)
1775
1776#define OFDHASHSAVE_S 19
1777#define OFDHASHSAVE_V(x) ((x) << OFDHASHSAVE_S)
1778#define OFDHASHSAVE_F OFDHASHSAVE_V(1U)
1779
1780#define OFDVRTSEL_S 18
1781#define OFDVRTSEL_V(x) ((x) << OFDVRTSEL_S)
1782#define OFDVRTSEL_F OFDVRTSEL_V(1U)
1783
1784#define OFDMAPEN_S 17
1785#define OFDMAPEN_V(x) ((x) << OFDMAPEN_S)
1786#define OFDMAPEN_F OFDMAPEN_V(1U)
1787
1788#define OFDLKPEN_S 16
1789#define OFDLKPEN_V(x) ((x) << OFDLKPEN_S)
1790#define OFDLKPEN_F OFDLKPEN_V(1U)
1791
1792#define SYN4TUPENIPV6_S 15
1793#define SYN4TUPENIPV6_V(x) ((x) << SYN4TUPENIPV6_S)
1794#define SYN4TUPENIPV6_F SYN4TUPENIPV6_V(1U)
1795
1796#define SYN2TUPENIPV6_S 14
1797#define SYN2TUPENIPV6_V(x) ((x) << SYN2TUPENIPV6_S)
1798#define SYN2TUPENIPV6_F SYN2TUPENIPV6_V(1U)
1799
1800#define SYN4TUPENIPV4_S 13
1801#define SYN4TUPENIPV4_V(x) ((x) << SYN4TUPENIPV4_S)
1802#define SYN4TUPENIPV4_F SYN4TUPENIPV4_V(1U)
1803
1804#define SYN2TUPENIPV4_S 12
1805#define SYN2TUPENIPV4_V(x) ((x) << SYN2TUPENIPV4_S)
1806#define SYN2TUPENIPV4_F SYN2TUPENIPV4_V(1U)
1807
1808#define SYNIP6SEL_S 11
1809#define SYNIP6SEL_V(x) ((x) << SYNIP6SEL_S)
1810#define SYNIP6SEL_F SYNIP6SEL_V(1U)
1811
1812#define SYNVRTSEL_S 10
1813#define SYNVRTSEL_V(x) ((x) << SYNVRTSEL_S)
1814#define SYNVRTSEL_F SYNVRTSEL_V(1U)
1815
1816#define SYNMAPEN_S 9
1817#define SYNMAPEN_V(x) ((x) << SYNMAPEN_S)
1818#define SYNMAPEN_F SYNMAPEN_V(1U)
1819
1820#define SYNLKPEN_S 8
1821#define SYNLKPEN_V(x) ((x) << SYNLKPEN_S)
1822#define SYNLKPEN_F SYNLKPEN_V(1U)
1823
1824#define CHANNELENABLE_S 7
1825#define CHANNELENABLE_V(x) ((x) << CHANNELENABLE_S)
1826#define CHANNELENABLE_F CHANNELENABLE_V(1U)
1827
1828#define PORTENABLE_S 6
1829#define PORTENABLE_V(x) ((x) << PORTENABLE_S)
1830#define PORTENABLE_F PORTENABLE_V(1U)
1831
1832#define TNLALLLOOKUP_S 5
1833#define TNLALLLOOKUP_V(x) ((x) << TNLALLLOOKUP_S)
1834#define TNLALLLOOKUP_F TNLALLLOOKUP_V(1U)
1835
1836#define VIRTENABLE_S 4
1837#define VIRTENABLE_V(x) ((x) << VIRTENABLE_S)
1838#define VIRTENABLE_F VIRTENABLE_V(1U)
1839
1840#define CONGESTIONENABLE_S 3
1841#define CONGESTIONENABLE_V(x) ((x) << CONGESTIONENABLE_S)
1842#define CONGESTIONENABLE_F CONGESTIONENABLE_V(1U)
1843
1844#define HASHTOEPLITZ_S 2
1845#define HASHTOEPLITZ_V(x) ((x) << HASHTOEPLITZ_S)
1846#define HASHTOEPLITZ_F HASHTOEPLITZ_V(1U)
1847
1848#define UDPENABLE_S 1
1849#define UDPENABLE_V(x) ((x) << UDPENABLE_S)
1850#define UDPENABLE_F UDPENABLE_V(1U)
1851
1852#define DISABLE_S 0
1853#define DISABLE_V(x) ((x) << DISABLE_S)
1854#define DISABLE_F DISABLE_V(1U)
1855
1856#define TP_RSS_CONFIG_TNL_A 0x7df4
1857
1858#define MASKSIZE_S 28
1859#define MASKSIZE_M 0xfU
1860#define MASKSIZE_V(x) ((x) << MASKSIZE_S)
1861#define MASKSIZE_G(x) (((x) >> MASKSIZE_S) & MASKSIZE_M)
1862
1863#define MASKFILTER_S 16
1864#define MASKFILTER_M 0x7ffU
1865#define MASKFILTER_V(x) ((x) << MASKFILTER_S)
1866#define MASKFILTER_G(x) (((x) >> MASKFILTER_S) & MASKFILTER_M)
1867
1868#define USEWIRECH_S 0
1869#define USEWIRECH_V(x) ((x) << USEWIRECH_S)
1870#define USEWIRECH_F USEWIRECH_V(1U)
1871
1872#define HASHALL_S 2
1873#define HASHALL_V(x) ((x) << HASHALL_S)
1874#define HASHALL_F HASHALL_V(1U)
1875
1876#define HASHETH_S 1
1877#define HASHETH_V(x) ((x) << HASHETH_S)
1878#define HASHETH_F HASHETH_V(1U)
1879
1880#define TP_RSS_CONFIG_OFD_A 0x7df8
1881
1882#define RRCPLMAPEN_S 20
1883#define RRCPLMAPEN_V(x) ((x) << RRCPLMAPEN_S)
1884#define RRCPLMAPEN_F RRCPLMAPEN_V(1U)
1885
1886#define RRCPLQUEWIDTH_S 16
1887#define RRCPLQUEWIDTH_M 0xfU
1888#define RRCPLQUEWIDTH_V(x) ((x) << RRCPLQUEWIDTH_S)
1889#define RRCPLQUEWIDTH_G(x) (((x) >> RRCPLQUEWIDTH_S) & RRCPLQUEWIDTH_M)
1890
1891#define TP_RSS_CONFIG_SYN_A 0x7dfc
1892#define TP_RSS_CONFIG_VRT_A 0x7e00
1893
1894#define VFRDRG_S 25
1895#define VFRDRG_V(x) ((x) << VFRDRG_S)
1896#define VFRDRG_F VFRDRG_V(1U)
1897
1898#define VFRDEN_S 24
1899#define VFRDEN_V(x) ((x) << VFRDEN_S)
1900#define VFRDEN_F VFRDEN_V(1U)
1901
1902#define VFPERREN_S 23
1903#define VFPERREN_V(x) ((x) << VFPERREN_S)
1904#define VFPERREN_F VFPERREN_V(1U)
1905
1906#define KEYPERREN_S 22
1907#define KEYPERREN_V(x) ((x) << KEYPERREN_S)
1908#define KEYPERREN_F KEYPERREN_V(1U)
1909
1910#define DISABLEVLAN_S 21
1911#define DISABLEVLAN_V(x) ((x) << DISABLEVLAN_S)
1912#define DISABLEVLAN_F DISABLEVLAN_V(1U)
1913
1914#define ENABLEUP0_S 20
1915#define ENABLEUP0_V(x) ((x) << ENABLEUP0_S)
1916#define ENABLEUP0_F ENABLEUP0_V(1U)
1917
1918#define HASHDELAY_S 16
1919#define HASHDELAY_M 0xfU
1920#define HASHDELAY_V(x) ((x) << HASHDELAY_S)
1921#define HASHDELAY_G(x) (((x) >> HASHDELAY_S) & HASHDELAY_M)
1922
1923#define VFWRADDR_S 8
1924#define VFWRADDR_M 0x7fU
1925#define VFWRADDR_V(x) ((x) << VFWRADDR_S)
1926#define VFWRADDR_G(x) (((x) >> VFWRADDR_S) & VFWRADDR_M)
1927
1928#define KEYMODE_S 6
1929#define KEYMODE_M 0x3U
1930#define KEYMODE_V(x) ((x) << KEYMODE_S)
1931#define KEYMODE_G(x) (((x) >> KEYMODE_S) & KEYMODE_M)
1932
1933#define VFWREN_S 5
1934#define VFWREN_V(x) ((x) << VFWREN_S)
1935#define VFWREN_F VFWREN_V(1U)
1936
1937#define KEYWREN_S 4
1938#define KEYWREN_V(x) ((x) << KEYWREN_S)
1939#define KEYWREN_F KEYWREN_V(1U)
1940
1941#define KEYWRADDR_S 0
1942#define KEYWRADDR_M 0xfU
1943#define KEYWRADDR_V(x) ((x) << KEYWRADDR_S)
1944#define KEYWRADDR_G(x) (((x) >> KEYWRADDR_S) & KEYWRADDR_M)
1945
1946#define KEYWRADDRX_S 30
1947#define KEYWRADDRX_M 0x3U
1948#define KEYWRADDRX_V(x) ((x) << KEYWRADDRX_S)
1949#define KEYWRADDRX_G(x) (((x) >> KEYWRADDRX_S) & KEYWRADDRX_M)
1950
1951#define KEYEXTEND_S 26
1952#define KEYEXTEND_V(x) ((x) << KEYEXTEND_S)
1953#define KEYEXTEND_F KEYEXTEND_V(1U)
1954
1955#define LKPIDXSIZE_S 24
1956#define LKPIDXSIZE_M 0x3U
1957#define LKPIDXSIZE_V(x) ((x) << LKPIDXSIZE_S)
1958#define LKPIDXSIZE_G(x) (((x) >> LKPIDXSIZE_S) & LKPIDXSIZE_M)
1959
1960#define TP_RSS_VFL_CONFIG_A 0x3a
1961#define TP_RSS_VFH_CONFIG_A 0x3b
1962
1963#define ENABLEUDPHASH_S 31
1964#define ENABLEUDPHASH_V(x) ((x) << ENABLEUDPHASH_S)
1965#define ENABLEUDPHASH_F ENABLEUDPHASH_V(1U)
1966
1967#define VFUPEN_S 30
1968#define VFUPEN_V(x) ((x) << VFUPEN_S)
1969#define VFUPEN_F VFUPEN_V(1U)
1970
1971#define VFVLNEX_S 28
1972#define VFVLNEX_V(x) ((x) << VFVLNEX_S)
1973#define VFVLNEX_F VFVLNEX_V(1U)
1974
1975#define VFPRTEN_S 27
1976#define VFPRTEN_V(x) ((x) << VFPRTEN_S)
1977#define VFPRTEN_F VFPRTEN_V(1U)
1978
1979#define VFCHNEN_S 26
1980#define VFCHNEN_V(x) ((x) << VFCHNEN_S)
1981#define VFCHNEN_F VFCHNEN_V(1U)
1982
1983#define DEFAULTQUEUE_S 16
1984#define DEFAULTQUEUE_M 0x3ffU
1985#define DEFAULTQUEUE_G(x) (((x) >> DEFAULTQUEUE_S) & DEFAULTQUEUE_M)
1986
1987#define VFIP6TWOTUPEN_S 6
1988#define VFIP6TWOTUPEN_V(x) ((x) << VFIP6TWOTUPEN_S)
1989#define VFIP6TWOTUPEN_F VFIP6TWOTUPEN_V(1U)
1990
1991#define VFIP4FOURTUPEN_S 5
1992#define VFIP4FOURTUPEN_V(x) ((x) << VFIP4FOURTUPEN_S)
1993#define VFIP4FOURTUPEN_F VFIP4FOURTUPEN_V(1U)
1994
1995#define VFIP4TWOTUPEN_S 4
1996#define VFIP4TWOTUPEN_V(x) ((x) << VFIP4TWOTUPEN_S)
1997#define VFIP4TWOTUPEN_F VFIP4TWOTUPEN_V(1U)
1998
1999#define KEYINDEX_S 0
2000#define KEYINDEX_M 0xfU
2001#define KEYINDEX_G(x) (((x) >> KEYINDEX_S) & KEYINDEX_M)
2002
2003#define MAPENABLE_S 31
2004#define MAPENABLE_V(x) ((x) << MAPENABLE_S)
2005#define MAPENABLE_F MAPENABLE_V(1U)
2006
2007#define CHNENABLE_S 30
2008#define CHNENABLE_V(x) ((x) << CHNENABLE_S)
2009#define CHNENABLE_F CHNENABLE_V(1U)
2010
2011#define PRTENABLE_S 29
2012#define PRTENABLE_V(x) ((x) << PRTENABLE_S)
2013#define PRTENABLE_F PRTENABLE_V(1U)
2014
2015#define UDPFOURTUPEN_S 28
2016#define UDPFOURTUPEN_V(x) ((x) << UDPFOURTUPEN_S)
2017#define UDPFOURTUPEN_F UDPFOURTUPEN_V(1U)
2018
2019#define IP6FOURTUPEN_S 27
2020#define IP6FOURTUPEN_V(x) ((x) << IP6FOURTUPEN_S)
2021#define IP6FOURTUPEN_F IP6FOURTUPEN_V(1U)
1229 2022
1230#define XGMAC_PORT_EPIO_DATA0 0x10c0 2023#define IP6TWOTUPEN_S 26
1231#define XGMAC_PORT_EPIO_DATA1 0x10c4 2024#define IP6TWOTUPEN_V(x) ((x) << IP6TWOTUPEN_S)
1232#define XGMAC_PORT_EPIO_DATA2 0x10c8 2025#define IP6TWOTUPEN_F IP6TWOTUPEN_V(1U)
1233#define XGMAC_PORT_EPIO_DATA3 0x10cc
1234#define XGMAC_PORT_EPIO_OP 0x10d0
1235#define EPIOWR 0x00000100U
1236#define ADDRESS_MASK 0x000000ffU
1237#define ADDRESS_SHIFT 0
1238#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
1239 2026
1240#define MAC_PORT_INT_CAUSE 0x8dc 2027#define IP4FOURTUPEN_S 25
1241#define XGMAC_PORT_INT_CAUSE 0x10dc 2028#define IP4FOURTUPEN_V(x) ((x) << IP4FOURTUPEN_S)
2029#define IP4FOURTUPEN_F IP4FOURTUPEN_V(1U)
1242 2030
1243#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28 2031#define IP4TWOTUPEN_S 24
2032#define IP4TWOTUPEN_V(x) ((x) << IP4TWOTUPEN_S)
2033#define IP4TWOTUPEN_F IP4TWOTUPEN_V(1U)
1244 2034
1245#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34 2035#define IVFWIDTH_S 20
2036#define IVFWIDTH_M 0xfU
2037#define IVFWIDTH_V(x) ((x) << IVFWIDTH_S)
2038#define IVFWIDTH_G(x) (((x) >> IVFWIDTH_S) & IVFWIDTH_M)
1246 2039
1247#define S_TX_MOD_QUEUE_REQ_MAP 0 2040#define CH1DEFAULTQUEUE_S 10
1248#define M_TX_MOD_QUEUE_REQ_MAP 0xffffU 2041#define CH1DEFAULTQUEUE_M 0x3ffU
1249#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP) 2042#define CH1DEFAULTQUEUE_V(x) ((x) << CH1DEFAULTQUEUE_S)
2043#define CH1DEFAULTQUEUE_G(x) (((x) >> CH1DEFAULTQUEUE_S) & CH1DEFAULTQUEUE_M)
1250 2044
1251#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30 2045#define CH0DEFAULTQUEUE_S 0
2046#define CH0DEFAULTQUEUE_M 0x3ffU
2047#define CH0DEFAULTQUEUE_V(x) ((x) << CH0DEFAULTQUEUE_S)
2048#define CH0DEFAULTQUEUE_G(x) (((x) >> CH0DEFAULTQUEUE_S) & CH0DEFAULTQUEUE_M)
1252 2049
1253#define S_TX_MODQ_WEIGHT3 24 2050#define VFLKPIDX_S 8
1254#define M_TX_MODQ_WEIGHT3 0xffU 2051#define VFLKPIDX_M 0xffU
1255#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3) 2052#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
1256 2053
1257#define S_TX_MODQ_WEIGHT2 16 2054#define TP_RSS_CONFIG_CNG_A 0x7e04
1258#define M_TX_MODQ_WEIGHT2 0xffU 2055#define TP_RSS_SECRET_KEY0_A 0x40
1259#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2) 2056#define TP_RSS_PF0_CONFIG_A 0x30
2057#define TP_RSS_PF_MAP_A 0x38
2058#define TP_RSS_PF_MSK_A 0x39
1260 2059
1261#define S_TX_MODQ_WEIGHT1 8 2060#define PF1LKPIDX_S 3
1262#define M_TX_MODQ_WEIGHT1 0xffU
1263#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
1264 2061
1265#define S_TX_MODQ_WEIGHT0 0 2062#define PF0LKPIDX_M 0x7U
1266#define M_TX_MODQ_WEIGHT0 0xffU
1267#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
1268 2063
1269#define A_TP_TX_SCHED_HDR 0x23 2064#define PF1MSKSIZE_S 4
2065#define PF1MSKSIZE_M 0xfU
1270 2066
1271#define A_TP_TX_SCHED_FIFO 0x24 2067#define CHNCOUNT3_S 31
2068#define CHNCOUNT3_V(x) ((x) << CHNCOUNT3_S)
2069#define CHNCOUNT3_F CHNCOUNT3_V(1U)
1272 2070
1273#define A_TP_TX_SCHED_PCMD 0x25 2071#define CHNCOUNT2_S 30
2072#define CHNCOUNT2_V(x) ((x) << CHNCOUNT2_S)
2073#define CHNCOUNT2_F CHNCOUNT2_V(1U)
1274 2074
1275#define S_VNIC 11 2075#define CHNCOUNT1_S 29
1276#define V_VNIC(x) ((x) << S_VNIC) 2076#define CHNCOUNT1_V(x) ((x) << CHNCOUNT1_S)
1277#define F_VNIC V_VNIC(1U) 2077#define CHNCOUNT1_F CHNCOUNT1_V(1U)
1278 2078
1279#define S_FRAGMENTATION 9 2079#define CHNCOUNT0_S 28
1280#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION) 2080#define CHNCOUNT0_V(x) ((x) << CHNCOUNT0_S)
1281#define F_FRAGMENTATION V_FRAGMENTATION(1U) 2081#define CHNCOUNT0_F CHNCOUNT0_V(1U)
1282 2082
1283#define S_MPSHITTYPE 8 2083#define CHNUNDFLOW3_S 27
1284#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE) 2084#define CHNUNDFLOW3_V(x) ((x) << CHNUNDFLOW3_S)
1285#define F_MPSHITTYPE V_MPSHITTYPE(1U) 2085#define CHNUNDFLOW3_F CHNUNDFLOW3_V(1U)
1286 2086
1287#define S_MACMATCH 7 2087#define CHNUNDFLOW2_S 26
1288#define V_MACMATCH(x) ((x) << S_MACMATCH) 2088#define CHNUNDFLOW2_V(x) ((x) << CHNUNDFLOW2_S)
1289#define F_MACMATCH V_MACMATCH(1U) 2089#define CHNUNDFLOW2_F CHNUNDFLOW2_V(1U)
1290 2090
1291#define S_ETHERTYPE 6 2091#define CHNUNDFLOW1_S 25
1292#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE) 2092#define CHNUNDFLOW1_V(x) ((x) << CHNUNDFLOW1_S)
1293#define F_ETHERTYPE V_ETHERTYPE(1U) 2093#define CHNUNDFLOW1_F CHNUNDFLOW1_V(1U)
1294 2094
1295#define S_PROTOCOL 5 2095#define CHNUNDFLOW0_S 24
1296#define V_PROTOCOL(x) ((x) << S_PROTOCOL) 2096#define CHNUNDFLOW0_V(x) ((x) << CHNUNDFLOW0_S)
1297#define F_PROTOCOL V_PROTOCOL(1U) 2097#define CHNUNDFLOW0_F CHNUNDFLOW0_V(1U)
1298 2098
1299#define S_TOS 4 2099#define RSTCHN3_S 19
1300#define V_TOS(x) ((x) << S_TOS) 2100#define RSTCHN3_V(x) ((x) << RSTCHN3_S)
1301#define F_TOS V_TOS(1U) 2101#define RSTCHN3_F RSTCHN3_V(1U)
1302 2102
1303#define S_VLAN 3 2103#define RSTCHN2_S 18
1304#define V_VLAN(x) ((x) << S_VLAN) 2104#define RSTCHN2_V(x) ((x) << RSTCHN2_S)
1305#define F_VLAN V_VLAN(1U) 2105#define RSTCHN2_F RSTCHN2_V(1U)
1306 2106
1307#define S_VNIC_ID 2 2107#define RSTCHN1_S 17
1308#define V_VNIC_ID(x) ((x) << S_VNIC_ID) 2108#define RSTCHN1_V(x) ((x) << RSTCHN1_S)
1309#define F_VNIC_ID V_VNIC_ID(1U) 2109#define RSTCHN1_F RSTCHN1_V(1U)
1310 2110
1311#define S_PORT 1 2111#define RSTCHN0_S 16
1312#define V_PORT(x) ((x) << S_PORT) 2112#define RSTCHN0_V(x) ((x) << RSTCHN0_S)
1313#define F_PORT V_PORT(1U) 2113#define RSTCHN0_F RSTCHN0_V(1U)
1314 2114
1315#define S_FCOE 0 2115#define UPDVLD_S 15
1316#define V_FCOE(x) ((x) << S_FCOE) 2116#define UPDVLD_V(x) ((x) << UPDVLD_S)
1317#define F_FCOE V_FCOE(1U) 2117#define UPDVLD_F UPDVLD_V(1U)
2118
2119#define XOFF_S 14
2120#define XOFF_V(x) ((x) << XOFF_S)
2121#define XOFF_F XOFF_V(1U)
2122
2123#define UPDCHN3_S 13
2124#define UPDCHN3_V(x) ((x) << UPDCHN3_S)
2125#define UPDCHN3_F UPDCHN3_V(1U)
2126
2127#define UPDCHN2_S 12
2128#define UPDCHN2_V(x) ((x) << UPDCHN2_S)
2129#define UPDCHN2_F UPDCHN2_V(1U)
2130
2131#define UPDCHN1_S 11
2132#define UPDCHN1_V(x) ((x) << UPDCHN1_S)
2133#define UPDCHN1_F UPDCHN1_V(1U)
2134
2135#define UPDCHN0_S 10
2136#define UPDCHN0_V(x) ((x) << UPDCHN0_S)
2137#define UPDCHN0_F UPDCHN0_V(1U)
2138
2139#define QUEUE_S 0
2140#define QUEUE_M 0x3ffU
2141#define QUEUE_V(x) ((x) << QUEUE_S)
2142#define QUEUE_G(x) (((x) >> QUEUE_S) & QUEUE_M)
2143
2144#define MPS_TRC_INT_CAUSE_A 0x985c
2145
2146#define MISCPERR_S 8
2147#define MISCPERR_V(x) ((x) << MISCPERR_S)
2148#define MISCPERR_F MISCPERR_V(1U)
2149
2150#define PKTFIFO_S 4
2151#define PKTFIFO_M 0xfU
2152#define PKTFIFO_V(x) ((x) << PKTFIFO_S)
2153
2154#define FILTMEM_S 0
2155#define FILTMEM_M 0xfU
2156#define FILTMEM_V(x) ((x) << FILTMEM_S)
2157
2158#define MPS_CLS_INT_CAUSE_A 0xd028
2159
2160#define HASHSRAM_S 2
2161#define HASHSRAM_V(x) ((x) << HASHSRAM_S)
2162#define HASHSRAM_F HASHSRAM_V(1U)
2163
2164#define MATCHTCAM_S 1
2165#define MATCHTCAM_V(x) ((x) << MATCHTCAM_S)
2166#define MATCHTCAM_F MATCHTCAM_V(1U)
2167
2168#define MATCHSRAM_S 0
2169#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
2170#define MATCHSRAM_F MATCHSRAM_V(1U)
2171
2172#define MPS_RX_PERR_INT_CAUSE_A 0x11074
2173
2174#define MPS_CLS_TCAM_Y_L_A 0xf000
2175#define MPS_CLS_TCAM_X_L_A 0xf008
2176
2177#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
2178#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
2179
2180#define MPS_CLS_TCAM_X_L(idx) (MPS_CLS_TCAM_X_L_A + (idx) * 16)
2181#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
2182
2183#define MPS_CLS_SRAM_L_A 0xe000
2184#define MPS_CLS_SRAM_H_A 0xe004
2185
2186#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
2187#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
2188
2189#define MPS_CLS_SRAM_H(idx) (MPS_CLS_SRAM_H_A + (idx) * 8)
2190#define NUM_MPS_CLS_SRAM_H_INSTANCES 336
2191
2192#define MULTILISTEN0_S 25
2193
2194#define REPLICATE_S 11
2195#define REPLICATE_V(x) ((x) << REPLICATE_S)
2196#define REPLICATE_F REPLICATE_V(1U)
2197
2198#define PF_S 8
2199#define PF_M 0x7U
2200#define PF_G(x) (((x) >> PF_S) & PF_M)
2201
2202#define VF_VALID_S 7
2203#define VF_VALID_V(x) ((x) << VF_VALID_S)
2204#define VF_VALID_F VF_VALID_V(1U)
2205
2206#define VF_S 0
2207#define VF_M 0x7fU
2208#define VF_G(x) (((x) >> VF_S) & VF_M)
2209
2210#define SRAM_PRIO3_S 22
2211#define SRAM_PRIO3_M 0x7U
2212#define SRAM_PRIO3_G(x) (((x) >> SRAM_PRIO3_S) & SRAM_PRIO3_M)
2213
2214#define SRAM_PRIO2_S 19
2215#define SRAM_PRIO2_M 0x7U
2216#define SRAM_PRIO2_G(x) (((x) >> SRAM_PRIO2_S) & SRAM_PRIO2_M)
2217
2218#define SRAM_PRIO1_S 16
2219#define SRAM_PRIO1_M 0x7U
2220#define SRAM_PRIO1_G(x) (((x) >> SRAM_PRIO1_S) & SRAM_PRIO1_M)
2221
2222#define SRAM_PRIO0_S 13
2223#define SRAM_PRIO0_M 0x7U
2224#define SRAM_PRIO0_G(x) (((x) >> SRAM_PRIO0_S) & SRAM_PRIO0_M)
2225
2226#define SRAM_VLD_S 12
2227#define SRAM_VLD_V(x) ((x) << SRAM_VLD_S)
2228#define SRAM_VLD_F SRAM_VLD_V(1U)
2229
2230#define PORTMAP_S 0
2231#define PORTMAP_M 0xfU
2232#define PORTMAP_G(x) (((x) >> PORTMAP_S) & PORTMAP_M)
2233
2234#define CPL_INTR_CAUSE_A 0x19054
2235
2236#define CIM_OP_MAP_PERR_S 5
2237#define CIM_OP_MAP_PERR_V(x) ((x) << CIM_OP_MAP_PERR_S)
2238#define CIM_OP_MAP_PERR_F CIM_OP_MAP_PERR_V(1U)
2239
2240#define CIM_OVFL_ERROR_S 4
2241#define CIM_OVFL_ERROR_V(x) ((x) << CIM_OVFL_ERROR_S)
2242#define CIM_OVFL_ERROR_F CIM_OVFL_ERROR_V(1U)
2243
2244#define TP_FRAMING_ERROR_S 3
2245#define TP_FRAMING_ERROR_V(x) ((x) << TP_FRAMING_ERROR_S)
2246#define TP_FRAMING_ERROR_F TP_FRAMING_ERROR_V(1U)
2247
2248#define SGE_FRAMING_ERROR_S 2
2249#define SGE_FRAMING_ERROR_V(x) ((x) << SGE_FRAMING_ERROR_S)
2250#define SGE_FRAMING_ERROR_F SGE_FRAMING_ERROR_V(1U)
2251
2252#define CIM_FRAMING_ERROR_S 1
2253#define CIM_FRAMING_ERROR_V(x) ((x) << CIM_FRAMING_ERROR_S)
2254#define CIM_FRAMING_ERROR_F CIM_FRAMING_ERROR_V(1U)
2255
2256#define ZERO_SWITCH_ERROR_S 0
2257#define ZERO_SWITCH_ERROR_V(x) ((x) << ZERO_SWITCH_ERROR_S)
2258#define ZERO_SWITCH_ERROR_F ZERO_SWITCH_ERROR_V(1U)
2259
2260#define SMB_INT_CAUSE_A 0x19090
2261
2262#define MSTTXFIFOPARINT_S 21
2263#define MSTTXFIFOPARINT_V(x) ((x) << MSTTXFIFOPARINT_S)
2264#define MSTTXFIFOPARINT_F MSTTXFIFOPARINT_V(1U)
2265
2266#define MSTRXFIFOPARINT_S 20
2267#define MSTRXFIFOPARINT_V(x) ((x) << MSTRXFIFOPARINT_S)
2268#define MSTRXFIFOPARINT_F MSTRXFIFOPARINT_V(1U)
2269
2270#define SLVFIFOPARINT_S 19
2271#define SLVFIFOPARINT_V(x) ((x) << SLVFIFOPARINT_S)
2272#define SLVFIFOPARINT_F SLVFIFOPARINT_V(1U)
2273
2274#define ULP_RX_INT_CAUSE_A 0x19158
2275#define ULP_RX_ISCSI_TAGMASK_A 0x19164
2276#define ULP_RX_ISCSI_PSZ_A 0x19168
2277#define ULP_RX_LA_CTL_A 0x1923c
2278#define ULP_RX_LA_RDPTR_A 0x19240
2279#define ULP_RX_LA_RDDATA_A 0x19244
2280#define ULP_RX_LA_WRPTR_A 0x19248
2281
2282#define HPZ3_S 24
2283#define HPZ3_V(x) ((x) << HPZ3_S)
2284
2285#define HPZ2_S 16
2286#define HPZ2_V(x) ((x) << HPZ2_S)
2287
2288#define HPZ1_S 8
2289#define HPZ1_V(x) ((x) << HPZ1_S)
2290
2291#define HPZ0_S 0
2292#define HPZ0_V(x) ((x) << HPZ0_S)
2293
2294#define ULP_RX_TDDP_PSZ_A 0x19178
2295
2296/* registers for module SF */
2297#define SF_DATA_A 0x193f8
2298#define SF_OP_A 0x193fc
2299
2300#define SF_BUSY_S 31
2301#define SF_BUSY_V(x) ((x) << SF_BUSY_S)
2302#define SF_BUSY_F SF_BUSY_V(1U)
2303
2304#define SF_LOCK_S 4
2305#define SF_LOCK_V(x) ((x) << SF_LOCK_S)
2306#define SF_LOCK_F SF_LOCK_V(1U)
2307
2308#define SF_CONT_S 3
2309#define SF_CONT_V(x) ((x) << SF_CONT_S)
2310#define SF_CONT_F SF_CONT_V(1U)
2311
2312#define BYTECNT_S 1
2313#define BYTECNT_V(x) ((x) << BYTECNT_S)
2314
2315#define OP_S 0
2316#define OP_V(x) ((x) << OP_S)
2317#define OP_F OP_V(1U)
2318
2319#define PL_PF_INT_CAUSE_A 0x3c0
2320
2321#define PFSW_S 3
2322#define PFSW_V(x) ((x) << PFSW_S)
2323#define PFSW_F PFSW_V(1U)
2324
2325#define PFCIM_S 1
2326#define PFCIM_V(x) ((x) << PFCIM_S)
2327#define PFCIM_F PFCIM_V(1U)
2328
2329#define PL_PF_INT_ENABLE_A 0x3c4
2330#define PL_PF_CTL_A 0x3c8
2331
2332#define PL_WHOAMI_A 0x19400
2333
2334#define SOURCEPF_S 8
2335#define SOURCEPF_M 0x7U
2336#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
2337
2338#define PL_INT_CAUSE_A 0x1940c
2339
2340#define ULP_TX_S 27
2341#define ULP_TX_V(x) ((x) << ULP_TX_S)
2342#define ULP_TX_F ULP_TX_V(1U)
2343
2344#define SGE_S 26
2345#define SGE_V(x) ((x) << SGE_S)
2346#define SGE_F SGE_V(1U)
2347
2348#define CPL_SWITCH_S 24
2349#define CPL_SWITCH_V(x) ((x) << CPL_SWITCH_S)
2350#define CPL_SWITCH_F CPL_SWITCH_V(1U)
2351
2352#define ULP_RX_S 23
2353#define ULP_RX_V(x) ((x) << ULP_RX_S)
2354#define ULP_RX_F ULP_RX_V(1U)
2355
2356#define PM_RX_S 22
2357#define PM_RX_V(x) ((x) << PM_RX_S)
2358#define PM_RX_F PM_RX_V(1U)
2359
2360#define PM_TX_S 21
2361#define PM_TX_V(x) ((x) << PM_TX_S)
2362#define PM_TX_F PM_TX_V(1U)
2363
2364#define MA_S 20
2365#define MA_V(x) ((x) << MA_S)
2366#define MA_F MA_V(1U)
2367
2368#define TP_S 19
2369#define TP_V(x) ((x) << TP_S)
2370#define TP_F TP_V(1U)
2371
2372#define LE_S 18
2373#define LE_V(x) ((x) << LE_S)
2374#define LE_F LE_V(1U)
2375
2376#define EDC1_S 17
2377#define EDC1_V(x) ((x) << EDC1_S)
2378#define EDC1_F EDC1_V(1U)
2379
2380#define EDC0_S 16
2381#define EDC0_V(x) ((x) << EDC0_S)
2382#define EDC0_F EDC0_V(1U)
2383
2384#define MC_S 15
2385#define MC_V(x) ((x) << MC_S)
2386#define MC_F MC_V(1U)
2387
2388#define PCIE_S 14
2389#define PCIE_V(x) ((x) << PCIE_S)
2390#define PCIE_F PCIE_V(1U)
2391
2392#define XGMAC_KR1_S 12
2393#define XGMAC_KR1_V(x) ((x) << XGMAC_KR1_S)
2394#define XGMAC_KR1_F XGMAC_KR1_V(1U)
2395
2396#define XGMAC_KR0_S 11
2397#define XGMAC_KR0_V(x) ((x) << XGMAC_KR0_S)
2398#define XGMAC_KR0_F XGMAC_KR0_V(1U)
2399
2400#define XGMAC1_S 10
2401#define XGMAC1_V(x) ((x) << XGMAC1_S)
2402#define XGMAC1_F XGMAC1_V(1U)
2403
2404#define XGMAC0_S 9
2405#define XGMAC0_V(x) ((x) << XGMAC0_S)
2406#define XGMAC0_F XGMAC0_V(1U)
2407
2408#define SMB_S 8
2409#define SMB_V(x) ((x) << SMB_S)
2410#define SMB_F SMB_V(1U)
2411
2412#define SF_S 7
2413#define SF_V(x) ((x) << SF_S)
2414#define SF_F SF_V(1U)
2415
2416#define PL_S 6
2417#define PL_V(x) ((x) << PL_S)
2418#define PL_F PL_V(1U)
2419
2420#define NCSI_S 5
2421#define NCSI_V(x) ((x) << NCSI_S)
2422#define NCSI_F NCSI_V(1U)
2423
2424#define MPS_S 4
2425#define MPS_V(x) ((x) << MPS_S)
2426#define MPS_F MPS_V(1U)
2427
2428#define CIM_S 0
2429#define CIM_V(x) ((x) << CIM_S)
2430#define CIM_F CIM_V(1U)
2431
2432#define MC1_S 31
2433
2434#define PL_INT_ENABLE_A 0x19410
2435#define PL_INT_MAP0_A 0x19414
2436#define PL_RST_A 0x19428
2437
2438#define PIORST_S 1
2439#define PIORST_V(x) ((x) << PIORST_S)
2440#define PIORST_F PIORST_V(1U)
2441
2442#define PIORSTMODE_S 0
2443#define PIORSTMODE_V(x) ((x) << PIORSTMODE_S)
2444#define PIORSTMODE_F PIORSTMODE_V(1U)
2445
2446#define PL_PL_INT_CAUSE_A 0x19430
2447
2448#define FATALPERR_S 4
2449#define FATALPERR_V(x) ((x) << FATALPERR_S)
2450#define FATALPERR_F FATALPERR_V(1U)
2451
2452#define PERRVFID_S 0
2453#define PERRVFID_V(x) ((x) << PERRVFID_S)
2454#define PERRVFID_F PERRVFID_V(1U)
2455
2456#define PL_REV_A 0x1943c
2457
2458#define REV_S 0
2459#define REV_M 0xfU
2460#define REV_V(x) ((x) << REV_S)
2461#define REV_G(x) (((x) >> REV_S) & REV_M)
2462
2463#define LE_DB_INT_CAUSE_A 0x19c3c
2464
2465#define REQQPARERR_S 16
2466#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
2467#define REQQPARERR_F REQQPARERR_V(1U)
2468
2469#define UNKNOWNCMD_S 15
2470#define UNKNOWNCMD_V(x) ((x) << UNKNOWNCMD_S)
2471#define UNKNOWNCMD_F UNKNOWNCMD_V(1U)
2472
2473#define PARITYERR_S 6
2474#define PARITYERR_V(x) ((x) << PARITYERR_S)
2475#define PARITYERR_F PARITYERR_V(1U)
2476
2477#define LIPMISS_S 5
2478#define LIPMISS_V(x) ((x) << LIPMISS_S)
2479#define LIPMISS_F LIPMISS_V(1U)
2480
2481#define LIP0_S 4
2482#define LIP0_V(x) ((x) << LIP0_S)
2483#define LIP0_F LIP0_V(1U)
2484
2485#define NCSI_INT_CAUSE_A 0x1a0d8
2486
2487#define CIM_DM_PRTY_ERR_S 8
2488#define CIM_DM_PRTY_ERR_V(x) ((x) << CIM_DM_PRTY_ERR_S)
2489#define CIM_DM_PRTY_ERR_F CIM_DM_PRTY_ERR_V(1U)
2490
2491#define MPS_DM_PRTY_ERR_S 7
2492#define MPS_DM_PRTY_ERR_V(x) ((x) << MPS_DM_PRTY_ERR_S)
2493#define MPS_DM_PRTY_ERR_F MPS_DM_PRTY_ERR_V(1U)
2494
2495#define TXFIFO_PRTY_ERR_S 1
2496#define TXFIFO_PRTY_ERR_V(x) ((x) << TXFIFO_PRTY_ERR_S)
2497#define TXFIFO_PRTY_ERR_F TXFIFO_PRTY_ERR_V(1U)
2498
2499#define RXFIFO_PRTY_ERR_S 0
2500#define RXFIFO_PRTY_ERR_V(x) ((x) << RXFIFO_PRTY_ERR_S)
2501#define RXFIFO_PRTY_ERR_F RXFIFO_PRTY_ERR_V(1U)
2502
2503#define XGMAC_PORT_CFG2_A 0x1018
2504
2505#define PATEN_S 18
2506#define PATEN_V(x) ((x) << PATEN_S)
2507#define PATEN_F PATEN_V(1U)
2508
2509#define MAGICEN_S 17
2510#define MAGICEN_V(x) ((x) << MAGICEN_S)
2511#define MAGICEN_F MAGICEN_V(1U)
2512
2513#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
2514#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
2515
2516#define XGMAC_PORT_EPIO_DATA0_A 0x10c0
2517#define XGMAC_PORT_EPIO_DATA1_A 0x10c4
2518#define XGMAC_PORT_EPIO_DATA2_A 0x10c8
2519#define XGMAC_PORT_EPIO_DATA3_A 0x10cc
2520#define XGMAC_PORT_EPIO_OP_A 0x10d0
2521
2522#define EPIOWR_S 8
2523#define EPIOWR_V(x) ((x) << EPIOWR_S)
2524#define EPIOWR_F EPIOWR_V(1U)
2525
2526#define ADDRESS_S 0
2527#define ADDRESS_V(x) ((x) << ADDRESS_S)
2528
2529#define MAC_PORT_INT_CAUSE_A 0x8dc
2530#define XGMAC_PORT_INT_CAUSE_A 0x10dc
2531
2532#define TP_TX_MOD_QUEUE_REQ_MAP_A 0x7e28
2533
2534#define TP_TX_MOD_QUEUE_WEIGHT0_A 0x7e30
2535#define TP_TX_MOD_CHANNEL_WEIGHT_A 0x7e34
2536
2537#define TX_MOD_QUEUE_REQ_MAP_S 0
2538#define TX_MOD_QUEUE_REQ_MAP_V(x) ((x) << TX_MOD_QUEUE_REQ_MAP_S)
2539
2540#define TX_MODQ_WEIGHT3_S 24
2541#define TX_MODQ_WEIGHT3_V(x) ((x) << TX_MODQ_WEIGHT3_S)
2542
2543#define TX_MODQ_WEIGHT2_S 16
2544#define TX_MODQ_WEIGHT2_V(x) ((x) << TX_MODQ_WEIGHT2_S)
2545
2546#define TX_MODQ_WEIGHT1_S 8
2547#define TX_MODQ_WEIGHT1_V(x) ((x) << TX_MODQ_WEIGHT1_S)
2548
2549#define TX_MODQ_WEIGHT0_S 0
2550#define TX_MODQ_WEIGHT0_V(x) ((x) << TX_MODQ_WEIGHT0_S)
2551
2552#define TP_TX_SCHED_HDR_A 0x23
2553#define TP_TX_SCHED_FIFO_A 0x24
2554#define TP_TX_SCHED_PCMD_A 0x25
1318 2555
1319#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 2556#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
1320#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 2557#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
@@ -1329,62 +2566,149 @@
1329#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR) 2566#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
1330#define MC_REG(reg, idx) (reg + MC_STRIDE * idx) 2567#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
1331 2568
1332#define MC_P_BIST_CMD 0x41400 2569#define MC_P_BIST_CMD_A 0x41400
1333#define MC_P_BIST_CMD_ADDR 0x41404 2570#define MC_P_BIST_CMD_ADDR_A 0x41404
1334#define MC_P_BIST_CMD_LEN 0x41408 2571#define MC_P_BIST_CMD_LEN_A 0x41408
1335#define MC_P_BIST_DATA_PATTERN 0x4140c 2572#define MC_P_BIST_DATA_PATTERN_A 0x4140c
1336#define MC_P_BIST_STATUS_RDATA 0x41488 2573#define MC_P_BIST_STATUS_RDATA_A 0x41488
1337#define EDC_T50_BASE_ADDR 0x50000 2574
1338#define EDC_H_BIST_CMD 0x50004 2575#define EDC_T50_BASE_ADDR 0x50000
1339#define EDC_H_BIST_CMD_ADDR 0x50008 2576
1340#define EDC_H_BIST_CMD_LEN 0x5000c 2577#define EDC_H_BIST_CMD_A 0x50004
1341#define EDC_H_BIST_DATA_PATTERN 0x50010 2578#define EDC_H_BIST_CMD_ADDR_A 0x50008
1342#define EDC_H_BIST_STATUS_RDATA 0x50028 2579#define EDC_H_BIST_CMD_LEN_A 0x5000c
1343 2580#define EDC_H_BIST_DATA_PATTERN_A 0x50010
1344#define EDC_T51_BASE_ADDR 0x50800 2581#define EDC_H_BIST_STATUS_RDATA_A 0x50028
2582
2583#define EDC_T51_BASE_ADDR 0x50800
2584
1345#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 2585#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
1346#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 2586#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
1347 2587
1348#define A_PL_VF_REV 0x4 2588#define PL_VF_REV_A 0x4
1349#define A_PL_VF_WHOAMI 0x0 2589#define PL_VF_WHOAMI_A 0x0
1350#define A_PL_VF_REVISION 0x8 2590#define PL_VF_REVISION_A 0x8
1351 2591
1352#define S_CHIPID 4 2592/* registers for module CIM */
1353#define M_CHIPID 0xfU 2593#define CIM_HOST_ACC_CTRL_A 0x7b50
1354#define V_CHIPID(x) ((x) << S_CHIPID) 2594#define CIM_HOST_ACC_DATA_A 0x7b54
1355#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) 2595#define UP_UP_DBG_LA_CFG_A 0x140
2596#define UP_UP_DBG_LA_DATA_A 0x144
1356 2597
1357/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the 2598#define HOSTBUSY_S 17
1358 * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP 2599#define HOSTBUSY_V(x) ((x) << HOSTBUSY_S)
1359 * selects for a particular field being present. These fields, when present 2600#define HOSTBUSY_F HOSTBUSY_V(1U)
1360 * in the Compressed Filter Tuple, have the following widths in bits. 2601
1361 */ 2602#define HOSTWRITE_S 16
1362#define W_FT_FCOE 1 2603#define HOSTWRITE_V(x) ((x) << HOSTWRITE_S)
1363#define W_FT_PORT 3 2604#define HOSTWRITE_F HOSTWRITE_V(1U)
1364#define W_FT_VNIC_ID 17 2605
1365#define W_FT_VLAN 17 2606#define CIM_IBQ_DBG_CFG_A 0x7b60
1366#define W_FT_TOS 8 2607
1367#define W_FT_PROTOCOL 8 2608#define IBQDBGADDR_S 16
1368#define W_FT_ETHERTYPE 16 2609#define IBQDBGADDR_M 0xfffU
1369#define W_FT_MACMATCH 9 2610#define IBQDBGADDR_V(x) ((x) << IBQDBGADDR_S)
1370#define W_FT_MPSHITTYPE 3 2611#define IBQDBGADDR_G(x) (((x) >> IBQDBGADDR_S) & IBQDBGADDR_M)
1371#define W_FT_FRAGMENTATION 1 2612
1372 2613#define IBQDBGBUSY_S 1
1373/* Some of the Compressed Filter Tuple fields have internal structure. These 2614#define IBQDBGBUSY_V(x) ((x) << IBQDBGBUSY_S)
1374 * bit shifts/masks describe those structures. All shifts are relative to the 2615#define IBQDBGBUSY_F IBQDBGBUSY_V(1U)
1375 * base position of the fields within the Compressed Filter Tuple 2616
1376 */ 2617#define IBQDBGEN_S 0
1377#define S_FT_VLAN_VLD 16 2618#define IBQDBGEN_V(x) ((x) << IBQDBGEN_S)
1378#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD) 2619#define IBQDBGEN_F IBQDBGEN_V(1U)
1379#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U) 2620
2621#define CIM_OBQ_DBG_CFG_A 0x7b64
2622
2623#define OBQDBGADDR_S 16
2624#define OBQDBGADDR_M 0xfffU
2625#define OBQDBGADDR_V(x) ((x) << OBQDBGADDR_S)
2626#define OBQDBGADDR_G(x) (((x) >> OBQDBGADDR_S) & OBQDBGADDR_M)
2627
2628#define OBQDBGBUSY_S 1
2629#define OBQDBGBUSY_V(x) ((x) << OBQDBGBUSY_S)
2630#define OBQDBGBUSY_F OBQDBGBUSY_V(1U)
2631
2632#define OBQDBGEN_S 0
2633#define OBQDBGEN_V(x) ((x) << OBQDBGEN_S)
2634#define OBQDBGEN_F OBQDBGEN_V(1U)
2635
2636#define CIM_IBQ_DBG_DATA_A 0x7b68
2637#define CIM_OBQ_DBG_DATA_A 0x7b6c
2638
2639#define UPDBGLARDEN_S 1
2640#define UPDBGLARDEN_V(x) ((x) << UPDBGLARDEN_S)
2641#define UPDBGLARDEN_F UPDBGLARDEN_V(1U)
2642
2643#define UPDBGLAEN_S 0
2644#define UPDBGLAEN_V(x) ((x) << UPDBGLAEN_S)
2645#define UPDBGLAEN_F UPDBGLAEN_V(1U)
2646
2647#define UPDBGLARDPTR_S 2
2648#define UPDBGLARDPTR_M 0xfffU
2649#define UPDBGLARDPTR_V(x) ((x) << UPDBGLARDPTR_S)
2650
2651#define UPDBGLAWRPTR_S 16
2652#define UPDBGLAWRPTR_M 0xfffU
2653#define UPDBGLAWRPTR_G(x) (((x) >> UPDBGLAWRPTR_S) & UPDBGLAWRPTR_M)
2654
2655#define UPDBGLACAPTPCONLY_S 30
2656#define UPDBGLACAPTPCONLY_V(x) ((x) << UPDBGLACAPTPCONLY_S)
2657#define UPDBGLACAPTPCONLY_F UPDBGLACAPTPCONLY_V(1U)
2658
2659#define CIM_QUEUE_CONFIG_REF_A 0x7b48
2660#define CIM_QUEUE_CONFIG_CTRL_A 0x7b4c
2661
2662#define CIMQSIZE_S 24
2663#define CIMQSIZE_M 0x3fU
2664#define CIMQSIZE_G(x) (((x) >> CIMQSIZE_S) & CIMQSIZE_M)
2665
2666#define CIMQBASE_S 16
2667#define CIMQBASE_M 0x3fU
2668#define CIMQBASE_G(x) (((x) >> CIMQBASE_S) & CIMQBASE_M)
2669
2670#define QUEFULLTHRSH_S 0
2671#define QUEFULLTHRSH_M 0x1ffU
2672#define QUEFULLTHRSH_G(x) (((x) >> QUEFULLTHRSH_S) & QUEFULLTHRSH_M)
2673
2674#define UP_IBQ_0_RDADDR_A 0x10
2675#define UP_IBQ_0_SHADOW_RDADDR_A 0x280
2676#define UP_OBQ_0_REALADDR_A 0x104
2677#define UP_OBQ_0_SHADOW_REALADDR_A 0x394
2678
2679#define IBQRDADDR_S 0
2680#define IBQRDADDR_M 0x1fffU
2681#define IBQRDADDR_G(x) (((x) >> IBQRDADDR_S) & IBQRDADDR_M)
2682
2683#define IBQWRADDR_S 0
2684#define IBQWRADDR_M 0x1fffU
2685#define IBQWRADDR_G(x) (((x) >> IBQWRADDR_S) & IBQWRADDR_M)
2686
2687#define QUERDADDR_S 0
2688#define QUERDADDR_M 0x7fffU
2689#define QUERDADDR_G(x) (((x) >> QUERDADDR_S) & QUERDADDR_M)
2690
2691#define QUEREMFLITS_S 0
2692#define QUEREMFLITS_M 0x7ffU
2693#define QUEREMFLITS_G(x) (((x) >> QUEREMFLITS_S) & QUEREMFLITS_M)
2694
2695#define QUEEOPCNT_S 16
2696#define QUEEOPCNT_M 0xfffU
2697#define QUEEOPCNT_G(x) (((x) >> QUEEOPCNT_S) & QUEEOPCNT_M)
2698
2699#define QUESOPCNT_S 0
2700#define QUESOPCNT_M 0xfffU
2701#define QUESOPCNT_G(x) (((x) >> QUESOPCNT_S) & QUESOPCNT_M)
1380 2702
1381#define S_FT_VNID_ID_VF 0 2703#define OBQSELECT_S 4
1382#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF) 2704#define OBQSELECT_V(x) ((x) << OBQSELECT_S)
2705#define OBQSELECT_F OBQSELECT_V(1U)
1383 2706
1384#define S_FT_VNID_ID_PF 7 2707#define IBQSELECT_S 3
1385#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF) 2708#define IBQSELECT_V(x) ((x) << IBQSELECT_S)
2709#define IBQSELECT_F IBQSELECT_V(1U)
1386 2710
1387#define S_FT_VNID_ID_VLD 16 2711#define QUENUMSELECT_S 0
1388#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD) 2712#define QUENUMSELECT_V(x) ((x) << QUENUMSELECT_S)
1389 2713
1390#endif /* __T4_REGS_H */ 2714#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
new file mode 100644
index 000000000000..19b2dcf6acde
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -0,0 +1,124 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_VALUES_H__
36#define __T4_VALUES_H__
37
38/* This file contains definitions for various T4 register value hardware
39 * constants. The types of values encoded here are predominantly those for
40 * register fields which control "modal" behavior. For the most part, we do
41 * not include definitions for register fields which are simple numeric
42 * metrics, etc.
43 */
44
45/* SGE register field values.
46 */
47
48/* CONTROL1 register */
49#define RXPKTCPLMODE_SPLIT_X 1
50
51#define INGPCIEBOUNDARY_SHIFT_X 5
52#define INGPCIEBOUNDARY_32B_X 0
53
54#define INGPADBOUNDARY_SHIFT_X 5
55
56/* CONTROL2 register */
57#define INGPACKBOUNDARY_SHIFT_X 5
58#define INGPACKBOUNDARY_16B_X 0
59
60/* GTS register */
61#define SGE_TIMERREGS 6
62#define TIMERREG_COUNTER0_X 0
63
64/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
65 * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
66 * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
67 * (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64. For Ingress Queues,
68 * we have a Going To Sleep register at offsets 8x+4.
69 *
70 * As noted above, we have many instances of the Simple Doorbell and Going To
71 * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
72 * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
73 * avoid buffering of the writes to the Simple Doorbell and we want to use a
74 * non-contiguous offset for the Going To Sleep writes in order to avoid
75 * possible combining between them.
76 */
77#define SGE_UDB_SIZE 128
78#define SGE_UDB_KDOORBELL 8
79#define SGE_UDB_GTS 20
80#define SGE_UDB_WCDOORBELL 64
81
82/* CIM register field values.
83 */
84#define X_MBOWNER_FW 1
85#define X_MBOWNER_PL 2
86
87/* PCI-E definitions */
88#define WINDOW_SHIFT_X 10
89#define PCIEOFST_SHIFT_X 10
90
91/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
92 * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
93 * selects for a particular field being present. These fields, when present
94 * in the Compressed Filter Tuple, have the following widths in bits.
95 */
96#define FT_FCOE_W 1
97#define FT_PORT_W 3
98#define FT_VNIC_ID_W 17
99#define FT_VLAN_W 17
100#define FT_TOS_W 8
101#define FT_PROTOCOL_W 8
102#define FT_ETHERTYPE_W 16
103#define FT_MACMATCH_W 9
104#define FT_MPSHITTYPE_W 3
105#define FT_FRAGMENTATION_W 1
106
107/* Some of the Compressed Filter Tuple fields have internal structure. These
108 * bit shifts/masks describe those structures. All shifts are relative to the
109 * base position of the fields within the Compressed Filter Tuple
110 */
111#define FT_VLAN_VLD_S 16
112#define FT_VLAN_VLD_V(x) ((x) << FT_VLAN_VLD_S)
113#define FT_VLAN_VLD_F FT_VLAN_VLD_V(1U)
114
115#define FT_VNID_ID_VF_S 0
116#define FT_VNID_ID_VF_V(x) ((x) << FT_VNID_ID_VF_S)
117
118#define FT_VNID_ID_PF_S 7
119#define FT_VNID_ID_PF_V(x) ((x) << FT_VNID_ID_PF_S)
120
121#define FT_VNID_ID_VLD_S 16
122#define FT_VNID_ID_VLD_V(x) ((x) << FT_VNID_ID_VLD_S)
123
124#endif /* __T4_VALUES_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 7c0aec85137a..9b353a88cbda 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -673,6 +673,7 @@ enum fw_cmd_opcodes {
673 FW_RSS_IND_TBL_CMD = 0x20, 673 FW_RSS_IND_TBL_CMD = 0x20,
674 FW_RSS_GLB_CONFIG_CMD = 0x22, 674 FW_RSS_GLB_CONFIG_CMD = 0x22,
675 FW_RSS_VI_CONFIG_CMD = 0x23, 675 FW_RSS_VI_CONFIG_CMD = 0x23,
676 FW_DEVLOG_CMD = 0x25,
676 FW_CLIP_CMD = 0x28, 677 FW_CLIP_CMD = 0x28,
677 FW_LASTC2E_CMD = 0x40, 678 FW_LASTC2E_CMD = 0x40,
678 FW_ERROR_CMD = 0x80, 679 FW_ERROR_CMD = 0x80,
@@ -1058,9 +1059,11 @@ enum fw_params_param_dev {
1058 FW_PARAMS_PARAM_DEV_FWREV = 0x0B, 1059 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
1059 FW_PARAMS_PARAM_DEV_TPREV = 0x0C, 1060 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
1060 FW_PARAMS_PARAM_DEV_CF = 0x0D, 1061 FW_PARAMS_PARAM_DEV_CF = 0x0D,
1062 FW_PARAMS_PARAM_DEV_DIAG = 0x11,
1061 FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */ 1063 FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
1062 FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */ 1064 FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
1063 FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, 1065 FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
1066 FW_PARAMS_PARAM_DEV_FWCACHE = 0x18,
1064}; 1067};
1065 1068
1066/* 1069/*
@@ -1120,6 +1123,16 @@ enum fw_params_param_dmaq {
1120 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13, 1123 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
1121}; 1124};
1122 1125
1126enum fw_params_param_dev_diag {
1127 FW_PARAM_DEV_DIAG_TMP = 0x00,
1128 FW_PARAM_DEV_DIAG_VDD = 0x01,
1129};
1130
1131enum fw_params_param_dev_fwcache {
1132 FW_PARAM_DEV_FWCACHE_FLUSH = 0x00,
1133 FW_PARAM_DEV_FWCACHE_FLUSHINV = 0x01,
1134};
1135
1123#define FW_PARAMS_MNEM_S 24 1136#define FW_PARAMS_MNEM_S 24
1124#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S) 1137#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S)
1125 1138
@@ -3005,21 +3018,29 @@ enum fw_hdr_chip {
3005 3018
3006#define FW_HDR_FW_VER_MAJOR_S 24 3019#define FW_HDR_FW_VER_MAJOR_S 24
3007#define FW_HDR_FW_VER_MAJOR_M 0xff 3020#define FW_HDR_FW_VER_MAJOR_M 0xff
3021#define FW_HDR_FW_VER_MAJOR_V(x) \
3022 ((x) << FW_HDR_FW_VER_MAJOR_S)
3008#define FW_HDR_FW_VER_MAJOR_G(x) \ 3023#define FW_HDR_FW_VER_MAJOR_G(x) \
3009 (((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M) 3024 (((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M)
3010 3025
3011#define FW_HDR_FW_VER_MINOR_S 16 3026#define FW_HDR_FW_VER_MINOR_S 16
3012#define FW_HDR_FW_VER_MINOR_M 0xff 3027#define FW_HDR_FW_VER_MINOR_M 0xff
3028#define FW_HDR_FW_VER_MINOR_V(x) \
3029 ((x) << FW_HDR_FW_VER_MINOR_S)
3013#define FW_HDR_FW_VER_MINOR_G(x) \ 3030#define FW_HDR_FW_VER_MINOR_G(x) \
3014 (((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M) 3031 (((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M)
3015 3032
3016#define FW_HDR_FW_VER_MICRO_S 8 3033#define FW_HDR_FW_VER_MICRO_S 8
3017#define FW_HDR_FW_VER_MICRO_M 0xff 3034#define FW_HDR_FW_VER_MICRO_M 0xff
3035#define FW_HDR_FW_VER_MICRO_V(x) \
3036 ((x) << FW_HDR_FW_VER_MICRO_S)
3018#define FW_HDR_FW_VER_MICRO_G(x) \ 3037#define FW_HDR_FW_VER_MICRO_G(x) \
3019 (((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M) 3038 (((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M)
3020 3039
3021#define FW_HDR_FW_VER_BUILD_S 0 3040#define FW_HDR_FW_VER_BUILD_S 0
3022#define FW_HDR_FW_VER_BUILD_M 0xff 3041#define FW_HDR_FW_VER_BUILD_M 0xff
3042#define FW_HDR_FW_VER_BUILD_V(x) \
3043 ((x) << FW_HDR_FW_VER_BUILD_S)
3023#define FW_HDR_FW_VER_BUILD_G(x) \ 3044#define FW_HDR_FW_VER_BUILD_G(x) \
3024 (((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M) 3045 (((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M)
3025 3046
@@ -3038,4 +3059,84 @@ enum fw_hdr_flags {
3038 FW_HDR_FLAGS_RESET_HALT = 0x00000001, 3059 FW_HDR_FLAGS_RESET_HALT = 0x00000001,
3039}; 3060};
3040 3061
3062/* length of the formatting string */
3063#define FW_DEVLOG_FMT_LEN 192
3064
3065/* maximum number of the formatting string parameters */
3066#define FW_DEVLOG_FMT_PARAMS_NUM 8
3067
3068/* priority levels */
3069enum fw_devlog_level {
3070 FW_DEVLOG_LEVEL_EMERG = 0x0,
3071 FW_DEVLOG_LEVEL_CRIT = 0x1,
3072 FW_DEVLOG_LEVEL_ERR = 0x2,
3073 FW_DEVLOG_LEVEL_NOTICE = 0x3,
3074 FW_DEVLOG_LEVEL_INFO = 0x4,
3075 FW_DEVLOG_LEVEL_DEBUG = 0x5,
3076 FW_DEVLOG_LEVEL_MAX = 0x5,
3077};
3078
3079/* facilities that may send a log message */
3080enum fw_devlog_facility {
3081 FW_DEVLOG_FACILITY_CORE = 0x00,
3082 FW_DEVLOG_FACILITY_CF = 0x01,
3083 FW_DEVLOG_FACILITY_SCHED = 0x02,
3084 FW_DEVLOG_FACILITY_TIMER = 0x04,
3085 FW_DEVLOG_FACILITY_RES = 0x06,
3086 FW_DEVLOG_FACILITY_HW = 0x08,
3087 FW_DEVLOG_FACILITY_FLR = 0x10,
3088 FW_DEVLOG_FACILITY_DMAQ = 0x12,
3089 FW_DEVLOG_FACILITY_PHY = 0x14,
3090 FW_DEVLOG_FACILITY_MAC = 0x16,
3091 FW_DEVLOG_FACILITY_PORT = 0x18,
3092 FW_DEVLOG_FACILITY_VI = 0x1A,
3093 FW_DEVLOG_FACILITY_FILTER = 0x1C,
3094 FW_DEVLOG_FACILITY_ACL = 0x1E,
3095 FW_DEVLOG_FACILITY_TM = 0x20,
3096 FW_DEVLOG_FACILITY_QFC = 0x22,
3097 FW_DEVLOG_FACILITY_DCB = 0x24,
3098 FW_DEVLOG_FACILITY_ETH = 0x26,
3099 FW_DEVLOG_FACILITY_OFLD = 0x28,
3100 FW_DEVLOG_FACILITY_RI = 0x2A,
3101 FW_DEVLOG_FACILITY_ISCSI = 0x2C,
3102 FW_DEVLOG_FACILITY_FCOE = 0x2E,
3103 FW_DEVLOG_FACILITY_FOISCSI = 0x30,
3104 FW_DEVLOG_FACILITY_FOFCOE = 0x32,
3105 FW_DEVLOG_FACILITY_MAX = 0x32,
3106};
3107
3108/* log message format */
3109struct fw_devlog_e {
3110 __be64 timestamp;
3111 __be32 seqno;
3112 __be16 reserved1;
3113 __u8 level;
3114 __u8 facility;
3115 __u8 fmt[FW_DEVLOG_FMT_LEN];
3116 __be32 params[FW_DEVLOG_FMT_PARAMS_NUM];
3117 __be32 reserved3[4];
3118};
3119
3120struct fw_devlog_cmd {
3121 __be32 op_to_write;
3122 __be32 retval_len16;
3123 __u8 level;
3124 __u8 r2[7];
3125 __be32 memtype_devlog_memaddr16_devlog;
3126 __be32 memsize_devlog;
3127 __be32 r3[2];
3128};
3129
3130#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S 28
3131#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M 0xf
3132#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(x) \
3133 (((x) >> FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S) & \
3134 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M)
3135
3136#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S 0
3137#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M 0xfffffff
3138#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(x) \
3139 (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
3140 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
3141
3041#endif /* _T4FW_INTERFACE_H_ */ 3142#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
new file mode 100644
index 000000000000..e2bd3f747858
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -0,0 +1,48 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4FW_VERSION_H__
36#define __T4FW_VERSION_H__
37
38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x0C
40#define T4FW_VERSION_MICRO 0x19
41#define T4FW_VERSION_BUILD 0x00
42
43#define T5FW_VERSION_MAJOR 0x01
44#define T5FW_VERSION_MINOR 0x0C
45#define T5FW_VERSION_MICRO 0x19
46#define T5FW_VERSION_BUILD 0x00
47
48#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index a936ee8958c7..122e2964e63b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -380,9 +380,9 @@ static void qenable(struct sge_rspq *rspq)
380 * enable interrupts. 380 * enable interrupts.
381 */ 381 */
382 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, 382 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
383 CIDXINC(0) | 383 CIDXINC_V(0) |
384 SEINTARM(rspq->intr_params) | 384 SEINTARM_V(rspq->intr_params) |
385 INGRESSQID(rspq->cntxt_id)); 385 INGRESSQID_V(rspq->cntxt_id));
386} 386}
387 387
388/* 388/*
@@ -403,9 +403,9 @@ static void enable_rx(struct adapter *adapter)
403 */ 403 */
404 if (adapter->flags & USING_MSI) 404 if (adapter->flags & USING_MSI)
405 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, 405 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
406 CIDXINC(0) | 406 CIDXINC_V(0) |
407 SEINTARM(s->intrq.intr_params) | 407 SEINTARM_V(s->intrq.intr_params) |
408 INGRESSQID(s->intrq.cntxt_id)); 408 INGRESSQID_V(s->intrq.cntxt_id));
409 409
410} 410}
411 411
@@ -450,7 +450,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
450 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. 450 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
451 */ 451 */
452 const struct cpl_sge_egr_update *p = (void *)(rsp + 3); 452 const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
453 opcode = G_CPL_OPCODE(ntohl(p->opcode_qid)); 453 opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
454 if (opcode != CPL_SGE_EGR_UPDATE) { 454 if (opcode != CPL_SGE_EGR_UPDATE) {
455 dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" 455 dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
456 , opcode); 456 , opcode);
@@ -471,7 +471,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
471 * free TX Queue Descriptors ... 471 * free TX Queue Descriptors ...
472 */ 472 */
473 const struct cpl_sge_egr_update *p = cpl; 473 const struct cpl_sge_egr_update *p = cpl;
474 unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid)); 474 unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
475 struct sge *s = &adapter->sge; 475 struct sge *s = &adapter->sge;
476 struct sge_txq *tq; 476 struct sge_txq *tq;
477 struct sge_eth_txq *txq; 477 struct sge_eth_txq *txq;
@@ -1673,7 +1673,7 @@ static void cxgb4vf_get_regs(struct net_device *dev,
1673 reg_block_dump(adapter, regbuf, 1673 reg_block_dump(adapter, regbuf,
1674 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, 1674 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1675 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) 1675 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1676 ? A_PL_VF_WHOAMI : A_PL_VF_REVISION)); 1676 ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1677 reg_block_dump(adapter, regbuf, 1677 reg_block_dump(adapter, regbuf,
1678 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, 1678 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1679 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); 1679 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
@@ -2294,26 +2294,22 @@ static int adap_init0(struct adapter *adapter)
2294 * threshold values from the SGE parameters. 2294 * threshold values from the SGE parameters.
2295 */ 2295 */
2296 s->timer_val[0] = core_ticks_to_us(adapter, 2296 s->timer_val[0] = core_ticks_to_us(adapter,
2297 TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1)); 2297 TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2298 s->timer_val[1] = core_ticks_to_us(adapter, 2298 s->timer_val[1] = core_ticks_to_us(adapter,
2299 TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1)); 2299 TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2300 s->timer_val[2] = core_ticks_to_us(adapter, 2300 s->timer_val[2] = core_ticks_to_us(adapter,
2301 TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3)); 2301 TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2302 s->timer_val[3] = core_ticks_to_us(adapter, 2302 s->timer_val[3] = core_ticks_to_us(adapter,
2303 TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3)); 2303 TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2304 s->timer_val[4] = core_ticks_to_us(adapter, 2304 s->timer_val[4] = core_ticks_to_us(adapter,
2305 TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5)); 2305 TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2306 s->timer_val[5] = core_ticks_to_us(adapter, 2306 s->timer_val[5] = core_ticks_to_us(adapter,
2307 TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5)); 2307 TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2308 2308
2309 s->counter_val[0] = 2309 s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2310 THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold); 2310 s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2311 s->counter_val[1] = 2311 s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2312 THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold); 2312 s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2313 s->counter_val[2] =
2314 THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
2315 s->counter_val[3] =
2316 THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
2317 2313
2318 /* 2314 /*
2319 * Grab our Virtual Interface resource allocation, extract the 2315 * Grab our Virtual Interface resource allocation, extract the
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f7fd1317d996..0545f0de1c52 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -47,6 +47,7 @@
47#include "t4vf_defs.h" 47#include "t4vf_defs.h"
48 48
49#include "../cxgb4/t4_regs.h" 49#include "../cxgb4/t4_regs.h"
50#include "../cxgb4/t4_values.h"
50#include "../cxgb4/t4fw_api.h" 51#include "../cxgb4/t4fw_api.h"
51#include "../cxgb4/t4_msg.h" 52#include "../cxgb4/t4_msg.h"
52 53
@@ -531,11 +532,11 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
531 */ 532 */
532 if (fl->pend_cred >= FL_PER_EQ_UNIT) { 533 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
533 if (is_t4(adapter->params.chip)) 534 if (is_t4(adapter->params.chip))
534 val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); 535 val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
535 else 536 else
536 val = PIDX_T5(fl->pend_cred / FL_PER_EQ_UNIT) | 537 val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
537 DBTYPE(1); 538 DBTYPE_F;
538 val |= DBPRIO(1); 539 val |= DBPRIO_F;
539 540
540 /* Make sure all memory writes to the Free List queue are 541 /* Make sure all memory writes to the Free List queue are
541 * committed before we tell the hardware about them. 542 * committed before we tell the hardware about them.
@@ -549,9 +550,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
549 if (unlikely(fl->bar2_addr == NULL)) { 550 if (unlikely(fl->bar2_addr == NULL)) {
550 t4_write_reg(adapter, 551 t4_write_reg(adapter,
551 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, 552 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
552 QID(fl->cntxt_id) | val); 553 QID_V(fl->cntxt_id) | val);
553 } else { 554 } else {
554 writel(val | QID(fl->bar2_qid), 555 writel(val | QID_V(fl->bar2_qid),
555 fl->bar2_addr + SGE_UDB_KDOORBELL); 556 fl->bar2_addr + SGE_UDB_KDOORBELL);
556 557
557 /* This Write memory Barrier will force the write to 558 /* This Write memory Barrier will force the write to
@@ -925,7 +926,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
925 } 926 }
926 927
927 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 928 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
928 ULPTX_NSGE(nfrags)); 929 ULPTX_NSGE_V(nfrags));
929 if (likely(--nfrags == 0)) 930 if (likely(--nfrags == 0))
930 return; 931 return;
931 /* 932 /*
@@ -979,12 +980,12 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
979 * doorbell mechanism; otherwise use the new BAR2 mechanism. 980 * doorbell mechanism; otherwise use the new BAR2 mechanism.
980 */ 981 */
981 if (unlikely(tq->bar2_addr == NULL)) { 982 if (unlikely(tq->bar2_addr == NULL)) {
982 u32 val = PIDX(n); 983 u32 val = PIDX_V(n);
983 984
984 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, 985 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
985 QID(tq->cntxt_id) | val); 986 QID_V(tq->cntxt_id) | val);
986 } else { 987 } else {
987 u32 val = PIDX_T5(n); 988 u32 val = PIDX_T5_V(n);
988 989
989 /* T4 and later chips share the same PIDX field offset within 990 /* T4 and later chips share the same PIDX field offset within
990 * the doorbell, but T5 and later shrank the field in order to 991 * the doorbell, but T5 and later shrank the field in order to
@@ -992,7 +993,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
992 * large in the first place (14 bits) so we just use the T5 993 * large in the first place (14 bits) so we just use the T5
993 * and later limits and warn if a Queue ID is too large. 994 * and later limits and warn if a Queue ID is too large.
994 */ 995 */
995 WARN_ON(val & DBPRIO(1)); 996 WARN_ON(val & DBPRIO_F);
996 997
997 /* If we're only writing a single Egress Unit and the BAR2 998 /* If we're only writing a single Egress Unit and the BAR2
998 * Queue ID is 0, we can use the Write Combining Doorbell 999 * Queue ID is 0, we can use the Write Combining Doorbell
@@ -1023,7 +1024,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
1023 count--; 1024 count--;
1024 } 1025 }
1025 } else 1026 } else
1026 writel(val | QID(tq->bar2_qid), 1027 writel(val | QID_V(tq->bar2_qid),
1027 tq->bar2_addr + SGE_UDB_KDOORBELL); 1028 tq->bar2_addr + SGE_UDB_KDOORBELL);
1028 1029
1029 /* This Write Memory Barrier will force the write to the User 1030 /* This Write Memory Barrier will force the write to the User
@@ -1325,9 +1326,9 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1325 * If there's a VLAN tag present, add that to the list of things to 1326 * If there's a VLAN tag present, add that to the list of things to
1326 * do in this Work Request. 1327 * do in this Work Request.
1327 */ 1328 */
1328 if (vlan_tx_tag_present(skb)) { 1329 if (skb_vlan_tag_present(skb)) {
1329 txq->vlan_ins++; 1330 txq->vlan_ins++;
1330 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); 1331 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
1331 } 1332 }
1332 1333
1333 /* 1334 /*
@@ -1603,7 +1604,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1603 * If this is a good TCP packet and we have Generic Receive Offload 1604 * If this is a good TCP packet and we have Generic Receive Offload
1604 * enabled, handle the packet in the GRO path. 1605 * enabled, handle the packet in the GRO path.
1605 */ 1606 */
1606 if ((pkt->l2info & cpu_to_be32(RXF_TCP)) && 1607 if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1607 (rspq->netdev->features & NETIF_F_GRO) && csum_ok && 1608 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1608 !pkt->ip_frag) { 1609 !pkt->ip_frag) {
1609 do_gro(rxq, gl, pkt); 1610 do_gro(rxq, gl, pkt);
@@ -1625,7 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1625 rxq->stats.pkts++; 1626 rxq->stats.pkts++;
1626 1627
1627 if (csum_ok && !pkt->err_vec && 1628 if (csum_ok && !pkt->err_vec &&
1628 (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { 1629 (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1629 if (!pkt->ip_frag) 1630 if (!pkt->ip_frag)
1630 skb->ip_summed = CHECKSUM_UNNECESSARY; 1631 skb->ip_summed = CHECKSUM_UNNECESSARY;
1631 else { 1632 else {
@@ -1875,13 +1876,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
1875 if (unlikely(work_done == 0)) 1876 if (unlikely(work_done == 0))
1876 rspq->unhandled_irqs++; 1877 rspq->unhandled_irqs++;
1877 1878
1878 val = CIDXINC(work_done) | SEINTARM(intr_params); 1879 val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1879 if (is_t4(rspq->adapter->params.chip)) { 1880 if (is_t4(rspq->adapter->params.chip)) {
1880 t4_write_reg(rspq->adapter, 1881 t4_write_reg(rspq->adapter,
1881 T4VF_SGE_BASE_ADDR + SGE_VF_GTS, 1882 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1882 val | INGRESSQID((u32)rspq->cntxt_id)); 1883 val | INGRESSQID_V((u32)rspq->cntxt_id));
1883 } else { 1884 } else {
1884 writel(val | INGRESSQID(rspq->bar2_qid), 1885 writel(val | INGRESSQID_V(rspq->bar2_qid),
1885 rspq->bar2_addr + SGE_UDB_GTS); 1886 rspq->bar2_addr + SGE_UDB_GTS);
1886 wmb(); 1887 wmb();
1887 } 1888 }
@@ -1975,12 +1976,12 @@ static unsigned int process_intrq(struct adapter *adapter)
1975 rspq_next(intrq); 1976 rspq_next(intrq);
1976 } 1977 }
1977 1978
1978 val = CIDXINC(work_done) | SEINTARM(intrq->intr_params); 1979 val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
1979 if (is_t4(adapter->params.chip)) 1980 if (is_t4(adapter->params.chip))
1980 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, 1981 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1981 val | INGRESSQID(intrq->cntxt_id)); 1982 val | INGRESSQID_V(intrq->cntxt_id));
1982 else { 1983 else {
1983 writel(val | INGRESSQID(intrq->bar2_qid), 1984 writel(val | INGRESSQID_V(intrq->bar2_qid),
1984 intrq->bar2_addr + SGE_UDB_GTS); 1985 intrq->bar2_addr + SGE_UDB_GTS);
1985 wmb(); 1986 wmb();
1986 } 1987 }
@@ -2583,7 +2584,7 @@ int t4vf_sge_init(struct adapter *adapter)
2583 fl0, fl1); 2584 fl0, fl1);
2584 return -EINVAL; 2585 return -EINVAL;
2585 } 2586 }
2586 if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) { 2587 if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
2587 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); 2588 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2588 return -EINVAL; 2589 return -EINVAL;
2589 } 2590 }
@@ -2593,9 +2594,9 @@ int t4vf_sge_init(struct adapter *adapter)
2593 */ 2594 */
2594 if (fl1) 2595 if (fl1)
2595 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; 2596 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2596 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) 2597 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2597 ? 128 : 64); 2598 ? 128 : 64);
2598 s->pktshift = PKTSHIFT_GET(sge_params->sge_control); 2599 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2599 2600
2600 /* T4 uses a single control field to specify both the PCIe Padding and 2601 /* T4 uses a single control field to specify both the PCIe Padding and
2601 * Packing Boundary. T5 introduced the ability to specify these 2602 * Packing Boundary. T5 introduced the ability to specify these
@@ -2607,8 +2608,8 @@ int t4vf_sge_init(struct adapter *adapter)
2607 * end doing this because it would initialize the Padding Boundary and 2608 * end doing this because it would initialize the Padding Boundary and
2608 * leave the Packing Boundary initialized to 0 (16 bytes).) 2609 * leave the Packing Boundary initialized to 0 (16 bytes).)
2609 */ 2610 */
2610 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2611 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
2611 X_INGPADBOUNDARY_SHIFT); 2612 INGPADBOUNDARY_SHIFT_X);
2612 if (is_t4(adapter->params.chip)) { 2613 if (is_t4(adapter->params.chip)) {
2613 s->fl_align = ingpadboundary; 2614 s->fl_align = ingpadboundary;
2614 } else { 2615 } else {
@@ -2633,7 +2634,7 @@ int t4vf_sge_init(struct adapter *adapter)
2633 * Congestion Threshold is in units of 2 Free List pointers.) 2634 * Congestion Threshold is in units of 2 Free List pointers.)
2634 */ 2635 */
2635 s->fl_starve_thres 2636 s->fl_starve_thres
2636 = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1; 2637 = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
2637 2638
2638 /* 2639 /*
2639 * Set up tasklet timers. 2640 * Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
index c7b127d93767..b516b12b1884 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h
@@ -64,8 +64,8 @@
64 * Mailbox Data in the fixed CIM PF map and the programmable VF map must 64 * Mailbox Data in the fixed CIM PF map and the programmable VF map must
65 * match. However, it's a useful convention ... 65 * match. However, it's a useful convention ...
66 */ 66 */
67#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA 67#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA_A
68#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA! 68#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA_A!
69#endif 69#endif
70 70
71/* 71/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 60426cf890a7..1b5506df35b1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -39,6 +39,7 @@
39#include "t4vf_defs.h" 39#include "t4vf_defs.h"
40 40
41#include "../cxgb4/t4_regs.h" 41#include "../cxgb4/t4_regs.h"
42#include "../cxgb4/t4_values.h"
42#include "../cxgb4/t4fw_api.h" 43#include "../cxgb4/t4fw_api.h"
43 44
44/* 45/*
@@ -137,9 +138,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
137 * Loop trying to get ownership of the mailbox. Return an error 138 * Loop trying to get ownership of the mailbox. Return an error
138 * if we can't gain ownership. 139 * if we can't gain ownership.
139 */ 140 */
140 v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); 141 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
141 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 142 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
142 v = MBOWNER_GET(t4_read_reg(adapter, mbox_ctl)); 143 v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
143 if (v != MBOX_OWNER_DRV) 144 if (v != MBOX_OWNER_DRV)
144 return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; 145 return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
145 146
@@ -161,7 +162,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
161 t4_read_reg(adapter, mbox_data); /* flush write */ 162 t4_read_reg(adapter, mbox_data); /* flush write */
162 163
163 t4_write_reg(adapter, mbox_ctl, 164 t4_write_reg(adapter, mbox_ctl,
164 MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 165 MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
165 t4_read_reg(adapter, mbox_ctl); /* flush write */ 166 t4_read_reg(adapter, mbox_ctl); /* flush write */
166 167
167 /* 168 /*
@@ -183,14 +184,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
183 * If we're the owner, see if this is the reply we wanted. 184 * If we're the owner, see if this is the reply we wanted.
184 */ 185 */
185 v = t4_read_reg(adapter, mbox_ctl); 186 v = t4_read_reg(adapter, mbox_ctl);
186 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { 187 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
187 /* 188 /*
188 * If the Message Valid bit isn't on, revoke ownership 189 * If the Message Valid bit isn't on, revoke ownership
189 * of the mailbox and continue waiting for our reply. 190 * of the mailbox and continue waiting for our reply.
190 */ 191 */
191 if ((v & MBMSGVALID) == 0) { 192 if ((v & MBMSGVALID_F) == 0) {
192 t4_write_reg(adapter, mbox_ctl, 193 t4_write_reg(adapter, mbox_ctl,
193 MBOWNER(MBOX_OWNER_NONE)); 194 MBOWNER_V(MBOX_OWNER_NONE));
194 continue; 195 continue;
195 } 196 }
196 197
@@ -216,7 +217,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
216 & FW_CMD_REQUEST_F) != 0); 217 & FW_CMD_REQUEST_F) != 0);
217 } 218 }
218 t4_write_reg(adapter, mbox_ctl, 219 t4_write_reg(adapter, mbox_ctl,
219 MBOWNER(MBOX_OWNER_NONE)); 220 MBOWNER_V(MBOX_OWNER_NONE));
220 return -FW_CMD_RETVAL_G(v); 221 return -FW_CMD_RETVAL_G(v);
221 } 222 }
222 } 223 }
@@ -530,19 +531,19 @@ int t4vf_get_sge_params(struct adapter *adapter)
530 int v; 531 int v;
531 532
532 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 533 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
533 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL)); 534 FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
534 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 535 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
535 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE)); 536 FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
536 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 537 params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
537 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0)); 538 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
538 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 539 params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
539 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1)); 540 FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
540 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 541 params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
541 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1)); 542 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
542 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 543 params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
543 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3)); 544 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
544 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 545 params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
545 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5)); 546 FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
546 v = t4vf_query_params(adapter, 7, params, vals); 547 v = t4vf_query_params(adapter, 7, params, vals);
547 if (v) 548 if (v)
548 return v; 549 return v;
@@ -578,9 +579,9 @@ int t4vf_get_sge_params(struct adapter *adapter)
578 } 579 }
579 580
580 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 581 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
581 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD)); 582 FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
582 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | 583 params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
583 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL)); 584 FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
584 v = t4vf_query_params(adapter, 2, params, vals); 585 v = t4vf_query_params(adapter, 2, params, vals);
585 if (v) 586 if (v)
586 return v; 587 return v;
@@ -617,8 +618,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
617 * the driver can just use it. 618 * the driver can just use it.
618 */ 619 */
619 whoami = t4_read_reg(adapter, 620 whoami = t4_read_reg(adapter,
620 T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI); 621 T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
621 pf = SOURCEPF_GET(whoami); 622 pf = SOURCEPF_G(whoami);
622 623
623 s_hps = (HOSTPAGESIZEPF0_S + 624 s_hps = (HOSTPAGESIZEPF0_S +
624 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); 625 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
@@ -630,10 +631,10 @@ int t4vf_get_sge_params(struct adapter *adapter)
630 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); 631 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
631 sge_params->sge_vf_eq_qpp = 632 sge_params->sge_vf_eq_qpp =
632 ((sge_params->sge_egress_queues_per_page >> s_qpp) 633 ((sge_params->sge_egress_queues_per_page >> s_qpp)
633 & QUEUESPERPAGEPF0_MASK); 634 & QUEUESPERPAGEPF0_M);
634 sge_params->sge_vf_iq_qpp = 635 sge_params->sge_vf_iq_qpp =
635 ((sge_params->sge_ingress_queues_per_page >> s_qpp) 636 ((sge_params->sge_ingress_queues_per_page >> s_qpp)
636 & QUEUESPERPAGEPF0_MASK); 637 & QUEUESPERPAGEPF0_M);
637 } 638 }
638 639
639 return 0; 640 return 0;
@@ -1592,7 +1593,7 @@ int t4vf_prep_adapter(struct adapter *adapter)
1592 break; 1593 break;
1593 1594
1594 case CHELSIO_T5: 1595 case CHELSIO_T5:
1595 chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); 1596 chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
1596 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); 1597 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
1597 break; 1598 break;
1598 } 1599 }
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 3a12c096ea1c..de9f7c97d916 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -475,8 +475,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
475 if (d) 475 if (d)
476 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE); 476 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
477 477
478 if (ep->rx_buf[i] != NULL) 478 kfree(ep->rx_buf[i]);
479 kfree(ep->rx_buf[i]);
480 } 479 }
481 480
482 for (i = 0; i < TX_QUEUE_ENTRIES; i++) { 481 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
@@ -486,8 +485,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
486 if (d) 485 if (d)
487 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE); 486 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
488 487
489 if (ep->tx_buf[i] != NULL) 488 kfree(ep->tx_buf[i]);
490 kfree(ep->tx_buf[i]);
491 } 489 }
492 490
493 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, 491 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 25c4d88853d8..84b6a2b46aec 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
33 33
34#define DRV_NAME "enic" 34#define DRV_NAME "enic"
35#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 35#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
36#define DRV_VERSION "2.1.1.67" 36#define DRV_VERSION "2.1.1.83"
37#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" 37#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
38 38
39#define ENIC_BARS_MAX 6 39#define ENIC_BARS_MAX 6
@@ -188,6 +188,7 @@ struct enic {
188 struct enic_rfs_flw_tbl rfs_h; 188 struct enic_rfs_flw_tbl rfs_h;
189 u32 rx_copybreak; 189 u32 rx_copybreak;
190 u8 rss_key[ENIC_RSS_LEN]; 190 u8 rss_key[ENIC_RSS_LEN];
191 struct vnic_gen_stats gen_stats;
191}; 192};
192 193
193static inline struct device *enic_get_dev(struct enic *enic) 194static inline struct device *enic_get_dev(struct enic *enic)
@@ -242,6 +243,19 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic)
242 return enic->rq_count + enic->wq_count + 1; 243 return enic->rq_count + enic->wq_count + 1;
243} 244}
244 245
246static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
247{
248 if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
249 net_warn_ratelimited("%s: PCI dma mapping failed!\n",
250 enic->netdev->name);
251 enic->gen_stats.dma_map_error++;
252
253 return -ENOMEM;
254 }
255
256 return 0;
257}
258
245void enic_reset_addr_lists(struct enic *enic); 259void enic_reset_addr_lists(struct enic *enic);
246int enic_sriov_enabled(struct enic *enic); 260int enic_sriov_enabled(struct enic *enic);
247int enic_is_valid_vf(struct enic *enic, int vf); 261int enic_is_valid_vf(struct enic *enic, int vf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index 87ddc44b590e..f8d2a6a34282 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -177,40 +177,6 @@ int enic_dev_intr_coal_timer_info(struct enic *enic)
177 return err; 177 return err;
178} 178}
179 179
180int enic_vnic_dev_deinit(struct enic *enic)
181{
182 int err;
183
184 spin_lock_bh(&enic->devcmd_lock);
185 err = vnic_dev_deinit(enic->vdev);
186 spin_unlock_bh(&enic->devcmd_lock);
187
188 return err;
189}
190
191int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
192{
193 int err;
194
195 spin_lock_bh(&enic->devcmd_lock);
196 err = vnic_dev_init_prov2(enic->vdev,
197 (u8 *)vp, vic_provinfo_size(vp));
198 spin_unlock_bh(&enic->devcmd_lock);
199
200 return err;
201}
202
203int enic_dev_deinit_done(struct enic *enic, int *status)
204{
205 int err;
206
207 spin_lock_bh(&enic->devcmd_lock);
208 err = vnic_dev_deinit_done(enic->vdev, status);
209 spin_unlock_bh(&enic->devcmd_lock);
210
211 return err;
212}
213
214/* rtnl lock is held */ 180/* rtnl lock is held */
215int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 181int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
216{ 182{
@@ -237,28 +203,6 @@ int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
237 return err; 203 return err;
238} 204}
239 205
240int enic_dev_enable2(struct enic *enic, int active)
241{
242 int err;
243
244 spin_lock_bh(&enic->devcmd_lock);
245 err = vnic_dev_enable2(enic->vdev, active);
246 spin_unlock_bh(&enic->devcmd_lock);
247
248 return err;
249}
250
251int enic_dev_enable2_done(struct enic *enic, int *status)
252{
253 int err;
254
255 spin_lock_bh(&enic->devcmd_lock);
256 err = vnic_dev_enable2_done(enic->vdev, status);
257 spin_unlock_bh(&enic->devcmd_lock);
258
259 return err;
260}
261
262int enic_dev_status_to_errno(int devcmd_status) 206int enic_dev_status_to_errno(int devcmd_status)
263{ 207{
264 switch (devcmd_status) { 208 switch (devcmd_status) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 10bb970b2f35..f5bb058b3f96 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -55,11 +55,6 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
55int enic_dev_enable(struct enic *enic); 55int enic_dev_enable(struct enic *enic);
56int enic_dev_disable(struct enic *enic); 56int enic_dev_disable(struct enic *enic);
57int enic_dev_intr_coal_timer_info(struct enic *enic); 57int enic_dev_intr_coal_timer_info(struct enic *enic);
58int enic_vnic_dev_deinit(struct enic *enic);
59int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp);
60int enic_dev_deinit_done(struct enic *enic, int *status);
61int enic_dev_enable2(struct enic *enic, int arg);
62int enic_dev_enable2_done(struct enic *enic, int *status);
63int enic_dev_status_to_errno(int devcmd_status); 58int enic_dev_status_to_errno(int devcmd_status);
64 59
65#endif /* _ENIC_DEV_H_ */ 60#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index eba1eb846d34..28d9ca675a27 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -24,6 +24,7 @@
24#include "enic_dev.h" 24#include "enic_dev.h"
25#include "enic_clsf.h" 25#include "enic_clsf.h"
26#include "vnic_rss.h" 26#include "vnic_rss.h"
27#include "vnic_stats.h"
27 28
28struct enic_stat { 29struct enic_stat {
29 char name[ETH_GSTRING_LEN]; 30 char name[ETH_GSTRING_LEN];
@@ -40,6 +41,11 @@ struct enic_stat {
40 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \ 41 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
41} 42}
42 43
44#define ENIC_GEN_STAT(stat) { \
45 .name = #stat, \
46 .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
47}
48
43static const struct enic_stat enic_tx_stats[] = { 49static const struct enic_stat enic_tx_stats[] = {
44 ENIC_TX_STAT(tx_frames_ok), 50 ENIC_TX_STAT(tx_frames_ok),
45 ENIC_TX_STAT(tx_unicast_frames_ok), 51 ENIC_TX_STAT(tx_unicast_frames_ok),
@@ -78,10 +84,15 @@ static const struct enic_stat enic_rx_stats[] = {
78 ENIC_RX_STAT(rx_frames_to_max), 84 ENIC_RX_STAT(rx_frames_to_max),
79}; 85};
80 86
87static const struct enic_stat enic_gen_stats[] = {
88 ENIC_GEN_STAT(dma_map_error),
89};
90
81static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); 91static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
82static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); 92static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
93static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
83 94
84void enic_intr_coal_set_rx(struct enic *enic, u32 timer) 95static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
85{ 96{
86 int i; 97 int i;
87 int intr; 98 int intr;
@@ -146,6 +157,10 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
146 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); 157 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
147 data += ETH_GSTRING_LEN; 158 data += ETH_GSTRING_LEN;
148 } 159 }
160 for (i = 0; i < enic_n_gen_stats; i++) {
161 memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
162 data += ETH_GSTRING_LEN;
163 }
149 break; 164 break;
150 } 165 }
151} 166}
@@ -154,7 +169,7 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
154{ 169{
155 switch (sset) { 170 switch (sset) {
156 case ETH_SS_STATS: 171 case ETH_SS_STATS:
157 return enic_n_tx_stats + enic_n_rx_stats; 172 return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
158 default: 173 default:
159 return -EOPNOTSUPP; 174 return -EOPNOTSUPP;
160 } 175 }
@@ -173,6 +188,8 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
173 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; 188 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
174 for (i = 0; i < enic_n_rx_stats; i++) 189 for (i = 0; i < enic_n_rx_stats; i++)
175 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index]; 190 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
191 for (i = 0; i < enic_n_gen_stats; i++)
192 *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
176} 193}
177 194
178static u32 enic_get_msglevel(struct net_device *netdev) 195static u32 enic_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index e356afa44e7d..9cbe038a388e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -45,6 +45,7 @@
45#ifdef CONFIG_NET_RX_BUSY_POLL 45#ifdef CONFIG_NET_RX_BUSY_POLL
46#include <net/busy_poll.h> 46#include <net/busy_poll.h>
47#endif 47#endif
48#include <linux/crash_dump.h>
48 49
49#include "cq_enet_desc.h" 50#include "cq_enet_desc.h"
50#include "vnic_dev.h" 51#include "vnic_dev.h"
@@ -88,7 +89,7 @@ MODULE_DEVICE_TABLE(pci, enic_id_table);
88 * coalescing timer values 89 * coalescing timer values
89 * {rx_rate in Mbps, mapping percentage of the range} 90 * {rx_rate in Mbps, mapping percentage of the range}
90 */ 91 */
91struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = { 92static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
92 {4000, 0}, 93 {4000, 0},
93 {4400, 10}, 94 {4400, 10},
94 {5060, 20}, 95 {5060, 20},
@@ -105,7 +106,7 @@ struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
105/* This table helps the driver to pick different ranges for rx coalescing 106/* This table helps the driver to pick different ranges for rx coalescing
106 * timer depending on the link speed. 107 * timer depending on the link speed.
107 */ 108 */
108struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { 109static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
109 {0, 0}, /* 0 - 4 Gbps */ 110 {0, 0}, /* 0 - 4 Gbps */
110 {0, 3}, /* 4 - 10 Gbps */ 111 {0, 3}, /* 4 - 10 Gbps */
111 {3, 6}, /* 10 - 40 Gbps */ 112 {3, 6}, /* 10 - 40 Gbps */
@@ -351,80 +352,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
351 return IRQ_HANDLED; 352 return IRQ_HANDLED;
352} 353}
353 354
354static inline void enic_queue_wq_skb_cont(struct enic *enic, 355static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
355 struct vnic_wq *wq, struct sk_buff *skb, 356 struct sk_buff *skb, unsigned int len_left,
356 unsigned int len_left, int loopback) 357 int loopback)
357{ 358{
358 const skb_frag_t *frag; 359 const skb_frag_t *frag;
360 dma_addr_t dma_addr;
359 361
360 /* Queue additional data fragments */ 362 /* Queue additional data fragments */
361 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 363 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
362 len_left -= skb_frag_size(frag); 364 len_left -= skb_frag_size(frag);
363 enic_queue_wq_desc_cont(wq, skb, 365 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
364 skb_frag_dma_map(&enic->pdev->dev, 366 skb_frag_size(frag),
365 frag, 0, skb_frag_size(frag), 367 DMA_TO_DEVICE);
366 DMA_TO_DEVICE), 368 if (unlikely(enic_dma_map_check(enic, dma_addr)))
367 skb_frag_size(frag), 369 return -ENOMEM;
368 (len_left == 0), /* EOP? */ 370 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
369 loopback); 371 (len_left == 0), /* EOP? */
372 loopback);
370 } 373 }
374
375 return 0;
371} 376}
372 377
373static inline void enic_queue_wq_skb_vlan(struct enic *enic, 378static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
374 struct vnic_wq *wq, struct sk_buff *skb, 379 struct sk_buff *skb, int vlan_tag_insert,
375 int vlan_tag_insert, unsigned int vlan_tag, int loopback) 380 unsigned int vlan_tag, int loopback)
376{ 381{
377 unsigned int head_len = skb_headlen(skb); 382 unsigned int head_len = skb_headlen(skb);
378 unsigned int len_left = skb->len - head_len; 383 unsigned int len_left = skb->len - head_len;
379 int eop = (len_left == 0); 384 int eop = (len_left == 0);
385 dma_addr_t dma_addr;
386 int err = 0;
387
388 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
389 PCI_DMA_TODEVICE);
390 if (unlikely(enic_dma_map_check(enic, dma_addr)))
391 return -ENOMEM;
380 392
381 /* Queue the main skb fragment. The fragments are no larger 393 /* Queue the main skb fragment. The fragments are no larger
382 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 394 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
383 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 395 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
384 * per fragment is queued. 396 * per fragment is queued.
385 */ 397 */
386 enic_queue_wq_desc(wq, skb, 398 enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
387 pci_map_single(enic->pdev, skb->data, 399 vlan_tag, eop, loopback);
388 head_len, PCI_DMA_TODEVICE),
389 head_len,
390 vlan_tag_insert, vlan_tag,
391 eop, loopback);
392 400
393 if (!eop) 401 if (!eop)
394 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 402 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
403
404 return err;
395} 405}
396 406
397static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, 407static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
398 struct vnic_wq *wq, struct sk_buff *skb, 408 struct sk_buff *skb, int vlan_tag_insert,
399 int vlan_tag_insert, unsigned int vlan_tag, int loopback) 409 unsigned int vlan_tag, int loopback)
400{ 410{
401 unsigned int head_len = skb_headlen(skb); 411 unsigned int head_len = skb_headlen(skb);
402 unsigned int len_left = skb->len - head_len; 412 unsigned int len_left = skb->len - head_len;
403 unsigned int hdr_len = skb_checksum_start_offset(skb); 413 unsigned int hdr_len = skb_checksum_start_offset(skb);
404 unsigned int csum_offset = hdr_len + skb->csum_offset; 414 unsigned int csum_offset = hdr_len + skb->csum_offset;
405 int eop = (len_left == 0); 415 int eop = (len_left == 0);
416 dma_addr_t dma_addr;
417 int err = 0;
418
419 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
420 PCI_DMA_TODEVICE);
421 if (unlikely(enic_dma_map_check(enic, dma_addr)))
422 return -ENOMEM;
406 423
407 /* Queue the main skb fragment. The fragments are no larger 424 /* Queue the main skb fragment. The fragments are no larger
408 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less 425 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
409 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor 426 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
410 * per fragment is queued. 427 * per fragment is queued.
411 */ 428 */
412 enic_queue_wq_desc_csum_l4(wq, skb, 429 enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
413 pci_map_single(enic->pdev, skb->data, 430 hdr_len, vlan_tag_insert, vlan_tag, eop,
414 head_len, PCI_DMA_TODEVICE), 431 loopback);
415 head_len,
416 csum_offset,
417 hdr_len,
418 vlan_tag_insert, vlan_tag,
419 eop, loopback);
420 432
421 if (!eop) 433 if (!eop)
422 enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); 434 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
435
436 return err;
423} 437}
424 438
425static inline void enic_queue_wq_skb_tso(struct enic *enic, 439static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
426 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, 440 struct sk_buff *skb, unsigned int mss,
427 int vlan_tag_insert, unsigned int vlan_tag, int loopback) 441 int vlan_tag_insert, unsigned int vlan_tag,
442 int loopback)
428{ 443{
429 unsigned int frag_len_left = skb_headlen(skb); 444 unsigned int frag_len_left = skb_headlen(skb);
430 unsigned int len_left = skb->len - frag_len_left; 445 unsigned int len_left = skb->len - frag_len_left;
@@ -454,20 +469,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
454 */ 469 */
455 while (frag_len_left) { 470 while (frag_len_left) {
456 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); 471 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
457 dma_addr = pci_map_single(enic->pdev, skb->data + offset, 472 dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
458 len, PCI_DMA_TODEVICE); 473 PCI_DMA_TODEVICE);
459 enic_queue_wq_desc_tso(wq, skb, 474 if (unlikely(enic_dma_map_check(enic, dma_addr)))
460 dma_addr, 475 return -ENOMEM;
461 len, 476 enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
462 mss, hdr_len, 477 vlan_tag_insert, vlan_tag,
463 vlan_tag_insert, vlan_tag, 478 eop && (len == frag_len_left), loopback);
464 eop && (len == frag_len_left), loopback);
465 frag_len_left -= len; 479 frag_len_left -= len;
466 offset += len; 480 offset += len;
467 } 481 }
468 482
469 if (eop) 483 if (eop)
470 return; 484 return 0;
471 485
472 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors 486 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
473 * for additional data fragments 487 * for additional data fragments
@@ -483,16 +497,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
483 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 497 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
484 offset, len, 498 offset, len,
485 DMA_TO_DEVICE); 499 DMA_TO_DEVICE);
486 enic_queue_wq_desc_cont(wq, skb, 500 if (unlikely(enic_dma_map_check(enic, dma_addr)))
487 dma_addr, 501 return -ENOMEM;
488 len, 502 enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
489 (len_left == 0) && 503 (len_left == 0) &&
490 (len == frag_len_left), /* EOP? */ 504 (len == frag_len_left),/*EOP*/
491 loopback); 505 loopback);
492 frag_len_left -= len; 506 frag_len_left -= len;
493 offset += len; 507 offset += len;
494 } 508 }
495 } 509 }
510
511 return 0;
496} 512}
497 513
498static inline void enic_queue_wq_skb(struct enic *enic, 514static inline void enic_queue_wq_skb(struct enic *enic,
@@ -502,25 +518,42 @@ static inline void enic_queue_wq_skb(struct enic *enic,
502 unsigned int vlan_tag = 0; 518 unsigned int vlan_tag = 0;
503 int vlan_tag_insert = 0; 519 int vlan_tag_insert = 0;
504 int loopback = 0; 520 int loopback = 0;
521 int err;
505 522
506 if (vlan_tx_tag_present(skb)) { 523 if (skb_vlan_tag_present(skb)) {
507 /* VLAN tag from trunking driver */ 524 /* VLAN tag from trunking driver */
508 vlan_tag_insert = 1; 525 vlan_tag_insert = 1;
509 vlan_tag = vlan_tx_tag_get(skb); 526 vlan_tag = skb_vlan_tag_get(skb);
510 } else if (enic->loop_enable) { 527 } else if (enic->loop_enable) {
511 vlan_tag = enic->loop_tag; 528 vlan_tag = enic->loop_tag;
512 loopback = 1; 529 loopback = 1;
513 } 530 }
514 531
515 if (mss) 532 if (mss)
516 enic_queue_wq_skb_tso(enic, wq, skb, mss, 533 err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
517 vlan_tag_insert, vlan_tag, loopback); 534 vlan_tag_insert, vlan_tag,
535 loopback);
518 else if (skb->ip_summed == CHECKSUM_PARTIAL) 536 else if (skb->ip_summed == CHECKSUM_PARTIAL)
519 enic_queue_wq_skb_csum_l4(enic, wq, skb, 537 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
520 vlan_tag_insert, vlan_tag, loopback); 538 vlan_tag, loopback);
521 else 539 else
522 enic_queue_wq_skb_vlan(enic, wq, skb, 540 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
523 vlan_tag_insert, vlan_tag, loopback); 541 vlan_tag, loopback);
542 if (unlikely(err)) {
543 struct vnic_wq_buf *buf;
544
545 buf = wq->to_use->prev;
546 /* while not EOP of previous pkt && queue not empty.
547 * For all non EOP bufs, os_buf is NULL.
548 */
549 while (!buf->os_buf && (buf->next != wq->to_clean)) {
550 enic_free_wq_buf(wq, buf);
551 wq->ring.desc_avail++;
552 buf = buf->prev;
553 }
554 wq->to_use = buf->next;
555 dev_kfree_skb(skb);
556 }
524} 557}
525 558
526/* netif_tx_lock held, process context with BHs disabled, or BH */ 559/* netif_tx_lock held, process context with BHs disabled, or BH */
@@ -950,8 +983,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
950 if (!skb) 983 if (!skb)
951 return -ENOMEM; 984 return -ENOMEM;
952 985
953 dma_addr = pci_map_single(enic->pdev, skb->data, 986 dma_addr = pci_map_single(enic->pdev, skb->data, len,
954 len, PCI_DMA_FROMDEVICE); 987 PCI_DMA_FROMDEVICE);
988 if (unlikely(enic_dma_map_check(enic, dma_addr))) {
989 dev_kfree_skb(skb);
990 return -ENOMEM;
991 }
955 992
956 enic_queue_rq_desc(rq, skb, os_buf_index, 993 enic_queue_rq_desc(rq, skb, os_buf_index,
957 dma_addr, len); 994 dma_addr, len);
@@ -1266,7 +1303,7 @@ static void enic_set_rx_cpu_rmap(struct enic *enic)
1266#endif /* CONFIG_RFS_ACCEL */ 1303#endif /* CONFIG_RFS_ACCEL */
1267 1304
1268#ifdef CONFIG_NET_RX_BUSY_POLL 1305#ifdef CONFIG_NET_RX_BUSY_POLL
1269int enic_busy_poll(struct napi_struct *napi) 1306static int enic_busy_poll(struct napi_struct *napi)
1270{ 1307{
1271 struct net_device *netdev = napi->dev; 1308 struct net_device *netdev = napi->dev;
1272 struct enic *enic = netdev_priv(netdev); 1309 struct enic *enic = netdev_priv(netdev);
@@ -2231,6 +2268,18 @@ static void enic_dev_deinit(struct enic *enic)
2231 enic_clear_intr_mode(enic); 2268 enic_clear_intr_mode(enic);
2232} 2269}
2233 2270
2271static void enic_kdump_kernel_config(struct enic *enic)
2272{
2273 if (is_kdump_kernel()) {
2274 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
2275 enic->rq_count = 1;
2276 enic->wq_count = 1;
2277 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
2278 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
2279 enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
2280 }
2281}
2282
2234static int enic_dev_init(struct enic *enic) 2283static int enic_dev_init(struct enic *enic)
2235{ 2284{
2236 struct device *dev = enic_get_dev(enic); 2285 struct device *dev = enic_get_dev(enic);
@@ -2260,6 +2309,10 @@ static int enic_dev_init(struct enic *enic)
2260 2309
2261 enic_get_res_counts(enic); 2310 enic_get_res_counts(enic);
2262 2311
2312 /* modify resource count if we are in kdump_kernel
2313 */
2314 enic_kdump_kernel_config(enic);
2315
2263 /* Set interrupt mode based on resource counts and system 2316 /* Set interrupt mode based on resource counts and system
2264 * capabilities 2317 * capabilities
2265 */ 2318 */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h
index 77750ec93954..74c81ed6fdab 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_stats.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h
@@ -62,6 +62,11 @@ struct vnic_rx_stats {
62 u64 rsvd[16]; 62 u64 rsvd[16];
63}; 63};
64 64
65/* Generic statistics */
66struct vnic_gen_stats {
67 u64 dma_map_error;
68};
69
65struct vnic_stats { 70struct vnic_stats {
66 struct vnic_tx_stats tx; 71 struct vnic_tx_stats tx;
67 struct vnic_rx_stats rx; 72 struct vnic_rx_stats rx;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 3e6b8d54dafc..b5a1c937fad2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -47,11 +47,14 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
47 wq->ring.desc_size * buf->index; 47 wq->ring.desc_size * buf->index;
48 if (buf->index + 1 == count) { 48 if (buf->index + 1 == count) {
49 buf->next = wq->bufs[0]; 49 buf->next = wq->bufs[0];
50 buf->next->prev = buf;
50 break; 51 break;
51 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { 52 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
52 buf->next = wq->bufs[i + 1]; 53 buf->next = wq->bufs[i + 1];
54 buf->next->prev = buf;
53 } else { 55 } else {
54 buf->next = buf + 1; 56 buf->next = buf + 1;
57 buf->next->prev = buf;
55 buf++; 58 buf++;
56 } 59 }
57 } 60 }
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 816f1ad6072f..296154351823 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -62,6 +62,7 @@ struct vnic_wq_buf {
62 uint8_t cq_entry; /* Gets completion event from hw */ 62 uint8_t cq_entry; /* Gets completion event from hw */
63 uint8_t desc_skip_cnt; /* Num descs to occupy */ 63 uint8_t desc_skip_cnt; /* Num descs to occupy */
64 uint8_t compressed_send; /* Both hdr and payload in one desc */ 64 uint8_t compressed_send; /* Both hdr and payload in one desc */
65 struct vnic_wq_buf *prev;
65}; 66};
66 67
67/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ 68/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index ef0bb58750e6..c0a7813603c3 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -36,6 +36,9 @@
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/irq.h> 37#include <linux/irq.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/regulator/consumer.h>
40#include <linux/gpio.h>
41#include <linux/of_gpio.h>
39 42
40#include <asm/delay.h> 43#include <asm/delay.h>
41#include <asm/irq.h> 44#include <asm/irq.h>
@@ -1426,11 +1429,48 @@ dm9000_probe(struct platform_device *pdev)
1426 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev); 1429 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1427 struct board_info *db; /* Point a board information structure */ 1430 struct board_info *db; /* Point a board information structure */
1428 struct net_device *ndev; 1431 struct net_device *ndev;
1432 struct device *dev = &pdev->dev;
1429 const unsigned char *mac_src; 1433 const unsigned char *mac_src;
1430 int ret = 0; 1434 int ret = 0;
1431 int iosize; 1435 int iosize;
1432 int i; 1436 int i;
1433 u32 id_val; 1437 u32 id_val;
1438 int reset_gpios;
1439 enum of_gpio_flags flags;
1440 struct regulator *power;
1441
1442 power = devm_regulator_get(dev, "vcc");
1443 if (IS_ERR(power)) {
1444 if (PTR_ERR(power) == -EPROBE_DEFER)
1445 return -EPROBE_DEFER;
1446 dev_dbg(dev, "no regulator provided\n");
1447 } else {
1448 ret = regulator_enable(power);
1449 if (ret != 0) {
1450 dev_err(dev,
1451 "Failed to enable power regulator: %d\n", ret);
1452 return ret;
1453 }
1454 dev_dbg(dev, "regulator enabled\n");
1455 }
1456
1457 reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
1458 &flags);
1459 if (gpio_is_valid(reset_gpios)) {
1460 ret = devm_gpio_request_one(dev, reset_gpios, flags,
1461 "dm9000_reset");
1462 if (ret) {
1463 dev_err(dev, "failed to request reset gpio %d: %d\n",
1464 reset_gpios, ret);
1465 return -ENODEV;
1466 }
1467
1468 /* According to manual PWRST# Low Period Min 1ms */
1469 msleep(2);
1470 gpio_set_value(reset_gpios, 1);
1471 /* Needs 3ms to read eeprom when PWRST is deasserted */
1472 msleep(4);
1473 }
1434 1474
1435 if (!pdata) { 1475 if (!pdata) {
1436 pdata = dm9000_parse_dt(&pdev->dev); 1476 pdata = dm9000_parse_dt(&pdev->dev);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 6aa887e0e1cb..9beb3d34d4ba 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -904,7 +904,7 @@ static void init_registers(struct net_device *dev)
904 } 904 }
905#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) 905#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
906 i |= 0xE000; 906 i |= 0xE000;
907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) 907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
908 i |= 0x4800; 908 i |= 0x4800;
909#else 909#else
910#warning Processor architecture undefined 910#warning Processor architecture undefined
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 712e7f8e1df7..27de37aa90af 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -59,26 +59,6 @@
59#define OC_SUBSYS_DEVICE_ID3 0xE612 59#define OC_SUBSYS_DEVICE_ID3 0xE612
60#define OC_SUBSYS_DEVICE_ID4 0xE652 60#define OC_SUBSYS_DEVICE_ID4 0xE652
61 61
62static inline char *nic_name(struct pci_dev *pdev)
63{
64 switch (pdev->device) {
65 case OC_DEVICE_ID1:
66 return OC_NAME;
67 case OC_DEVICE_ID2:
68 return OC_NAME_BE;
69 case OC_DEVICE_ID3:
70 case OC_DEVICE_ID4:
71 return OC_NAME_LANCER;
72 case BE_DEVICE_ID2:
73 return BE3_NAME;
74 case OC_DEVICE_ID5:
75 case OC_DEVICE_ID6:
76 return OC_NAME_SH;
77 default:
78 return BE_NAME;
79 }
80}
81
82/* Number of bytes of an RX frame that are copied to skb->data */ 62/* Number of bytes of an RX frame that are copied to skb->data */
83#define BE_HDR_LEN ((u16) 64) 63#define BE_HDR_LEN ((u16) 64)
84/* allocate extra space to allow tunneling decapsulation without head reallocation */ 64/* allocate extra space to allow tunneling decapsulation without head reallocation */
@@ -243,7 +223,6 @@ struct be_tx_stats {
243 u64 tx_bytes; 223 u64 tx_bytes;
244 u64 tx_pkts; 224 u64 tx_pkts;
245 u64 tx_reqs; 225 u64 tx_reqs;
246 u64 tx_wrbs;
247 u64 tx_compl; 226 u64 tx_compl;
248 ulong tx_jiffies; 227 ulong tx_jiffies;
249 u32 tx_stops; 228 u32 tx_stops;
@@ -266,6 +245,9 @@ struct be_tx_obj {
266 /* Remember the skbs that were transmitted */ 245 /* Remember the skbs that were transmitted */
267 struct sk_buff *sent_skb_list[TX_Q_LEN]; 246 struct sk_buff *sent_skb_list[TX_Q_LEN];
268 struct be_tx_stats stats; 247 struct be_tx_stats stats;
248 u16 pend_wrb_cnt; /* Number of WRBs yet to be given to HW */
249 u16 last_req_wrb_cnt; /* wrb cnt of the last req in the Q */
250 u16 last_req_hdr; /* index of the last req's hdr-wrb */
269} ____cacheline_aligned_in_smp; 251} ____cacheline_aligned_in_smp;
270 252
271/* Struct to remember the pages posted for rx frags */ 253/* Struct to remember the pages posted for rx frags */
@@ -379,15 +361,14 @@ enum vf_state {
379 ASSIGNED = 1 361 ASSIGNED = 1
380}; 362};
381 363
382#define BE_FLAGS_LINK_STATUS_INIT 1 364#define BE_FLAGS_LINK_STATUS_INIT BIT(1)
383#define BE_FLAGS_SRIOV_ENABLED (1 << 2) 365#define BE_FLAGS_SRIOV_ENABLED BIT(2)
384#define BE_FLAGS_WORKER_SCHEDULED (1 << 3) 366#define BE_FLAGS_WORKER_SCHEDULED BIT(3)
385#define BE_FLAGS_VLAN_PROMISC (1 << 4) 367#define BE_FLAGS_NAPI_ENABLED BIT(6)
386#define BE_FLAGS_MCAST_PROMISC (1 << 5) 368#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD BIT(7)
387#define BE_FLAGS_NAPI_ENABLED (1 << 9) 369#define BE_FLAGS_VXLAN_OFFLOADS BIT(8)
388#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 370#define BE_FLAGS_SETUP_DONE BIT(9)
389#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) 371#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10)
390#define BE_FLAGS_SETUP_DONE (1 << 13)
391 372
392#define BE_UC_PMAC_COUNT 30 373#define BE_UC_PMAC_COUNT 30
393#define BE_VF_UC_PMAC_COUNT 2 374#define BE_VF_UC_PMAC_COUNT 2
@@ -397,6 +378,8 @@ enum vf_state {
397#define LANCER_DELETE_FW_DUMP 0x2 378#define LANCER_DELETE_FW_DUMP 0x2
398 379
399struct phy_info { 380struct phy_info {
381/* From SFF-8472 spec */
382#define SFP_VENDOR_NAME_LEN 17
400 u8 transceiver; 383 u8 transceiver;
401 u8 autoneg; 384 u8 autoneg;
402 u8 fc_autoneg; 385 u8 fc_autoneg;
@@ -410,6 +393,8 @@ struct phy_info {
410 u32 advertising; 393 u32 advertising;
411 u32 supported; 394 u32 supported;
412 u8 cable_type; 395 u8 cable_type;
396 u8 vendor_name[SFP_VENDOR_NAME_LEN];
397 u8 vendor_pn[SFP_VENDOR_NAME_LEN];
413}; 398};
414 399
415struct be_resources { 400struct be_resources {
@@ -467,8 +452,6 @@ struct be_adapter {
467 452
468 struct be_drv_stats drv_stats; 453 struct be_drv_stats drv_stats;
469 struct be_aic_obj aic_obj[MAX_EVT_QS]; 454 struct be_aic_obj aic_obj[MAX_EVT_QS];
470 u16 vlans_added;
471 unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
472 u8 vlan_prio_bmap; /* Available Priority BitMap */ 455 u8 vlan_prio_bmap; /* Available Priority BitMap */
473 u16 recommended_prio; /* Recommended Priority */ 456 u16 recommended_prio; /* Recommended Priority */
474 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */ 457 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -484,8 +467,15 @@ struct be_adapter {
484 /* Ethtool knobs and info */ 467 /* Ethtool knobs and info */
485 char fw_ver[FW_VER_LEN]; 468 char fw_ver[FW_VER_LEN];
486 char fw_on_flash[FW_VER_LEN]; 469 char fw_on_flash[FW_VER_LEN];
470
471 /* IFACE filtering fields */
487 int if_handle; /* Used to configure filtering */ 472 int if_handle; /* Used to configure filtering */
473 u32 if_flags; /* Interface filtering flags */
488 u32 *pmac_id; /* MAC addr handle used by BE card */ 474 u32 *pmac_id; /* MAC addr handle used by BE card */
475 u32 uc_macs; /* Count of secondary UC MAC programmed */
476 unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
477 u16 vlans_added;
478
489 u32 beacon_state; /* for set_phys_id */ 479 u32 beacon_state; /* for set_phys_id */
490 480
491 bool eeh_error; 481 bool eeh_error;
@@ -493,7 +483,7 @@ struct be_adapter {
493 bool hw_error; 483 bool hw_error;
494 484
495 u32 port_num; 485 u32 port_num;
496 bool promiscuous; 486 char port_name;
497 u8 mc_type; 487 u8 mc_type;
498 u32 function_mode; 488 u32 function_mode;
499 u32 function_caps; 489 u32 function_caps;
@@ -526,7 +516,6 @@ struct be_adapter {
526 struct phy_info phy; 516 struct phy_info phy;
527 u8 wol_cap; 517 u8 wol_cap;
528 bool wol_en; 518 bool wol_en;
529 u32 uc_macs; /* Count of secondary UC MAC programmed */
530 u16 asic_rev; 519 u16 asic_rev;
531 u16 qnq_vid; 520 u16 qnq_vid;
532 u32 msg_enable; 521 u32 msg_enable;
@@ -732,19 +721,6 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
732 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 721 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
733} 722}
734 723
735static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
736{
737 u32 addr;
738
739 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
740
741 mac[5] = (u8)(addr & 0xFF);
742 mac[4] = (u8)((addr >> 8) & 0xFF);
743 mac[3] = (u8)((addr >> 16) & 0xFF);
744 /* Use the OUI from the current MAC address */
745 memcpy(mac, adapter->netdev->dev_addr, 3);
746}
747
748static inline bool be_multi_rxq(const struct be_adapter *adapter) 724static inline bool be_multi_rxq(const struct be_adapter *adapter)
749{ 725{
750 return adapter->num_rx_qs > 1; 726 return adapter->num_rx_qs > 1;
@@ -767,129 +743,6 @@ static inline void be_clear_all_error(struct be_adapter *adapter)
767 adapter->fw_timeout = false; 743 adapter->fw_timeout = false;
768} 744}
769 745
770static inline bool be_is_wol_excluded(struct be_adapter *adapter)
771{
772 struct pci_dev *pdev = adapter->pdev;
773
774 if (!be_physfn(adapter))
775 return true;
776
777 switch (pdev->subsystem_device) {
778 case OC_SUBSYS_DEVICE_ID1:
779 case OC_SUBSYS_DEVICE_ID2:
780 case OC_SUBSYS_DEVICE_ID3:
781 case OC_SUBSYS_DEVICE_ID4:
782 return true;
783 default:
784 return false;
785 }
786}
787
788static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
789{
790 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
791}
792
793#ifdef CONFIG_NET_RX_BUSY_POLL
794static inline bool be_lock_napi(struct be_eq_obj *eqo)
795{
796 bool status = true;
797
798 spin_lock(&eqo->lock); /* BH is already disabled */
799 if (eqo->state & BE_EQ_LOCKED) {
800 WARN_ON(eqo->state & BE_EQ_NAPI);
801 eqo->state |= BE_EQ_NAPI_YIELD;
802 status = false;
803 } else {
804 eqo->state = BE_EQ_NAPI;
805 }
806 spin_unlock(&eqo->lock);
807 return status;
808}
809
810static inline void be_unlock_napi(struct be_eq_obj *eqo)
811{
812 spin_lock(&eqo->lock); /* BH is already disabled */
813
814 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
815 eqo->state = BE_EQ_IDLE;
816
817 spin_unlock(&eqo->lock);
818}
819
820static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
821{
822 bool status = true;
823
824 spin_lock_bh(&eqo->lock);
825 if (eqo->state & BE_EQ_LOCKED) {
826 eqo->state |= BE_EQ_POLL_YIELD;
827 status = false;
828 } else {
829 eqo->state |= BE_EQ_POLL;
830 }
831 spin_unlock_bh(&eqo->lock);
832 return status;
833}
834
835static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
836{
837 spin_lock_bh(&eqo->lock);
838
839 WARN_ON(eqo->state & (BE_EQ_NAPI));
840 eqo->state = BE_EQ_IDLE;
841
842 spin_unlock_bh(&eqo->lock);
843}
844
845static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
846{
847 spin_lock_init(&eqo->lock);
848 eqo->state = BE_EQ_IDLE;
849}
850
851static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
852{
853 local_bh_disable();
854
855 /* It's enough to just acquire napi lock on the eqo to stop
856 * be_busy_poll() from processing any queueus.
857 */
858 while (!be_lock_napi(eqo))
859 mdelay(1);
860
861 local_bh_enable();
862}
863
864#else /* CONFIG_NET_RX_BUSY_POLL */
865
866static inline bool be_lock_napi(struct be_eq_obj *eqo)
867{
868 return true;
869}
870
871static inline void be_unlock_napi(struct be_eq_obj *eqo)
872{
873}
874
875static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
876{
877 return false;
878}
879
880static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
881{
882}
883
884static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
885{
886}
887
888static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
889{
890}
891#endif /* CONFIG_NET_RX_BUSY_POLL */
892
893void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 746void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
894 u16 num_popped); 747 u16 num_popped);
895void be_link_status_update(struct be_adapter *adapter, u8 link_status); 748void be_link_status_update(struct be_adapter *adapter, u8 link_status);
@@ -898,16 +751,6 @@ int be_load_fw(struct be_adapter *adapter, u8 *func);
898bool be_is_wol_supported(struct be_adapter *adapter); 751bool be_is_wol_supported(struct be_adapter *adapter);
899bool be_pause_supported(struct be_adapter *adapter); 752bool be_pause_supported(struct be_adapter *adapter);
900u32 be_get_fw_log_level(struct be_adapter *adapter); 753u32 be_get_fw_log_level(struct be_adapter *adapter);
901
902static inline int fw_major_num(const char *fw_ver)
903{
904 int fw_major = 0;
905
906 sscanf(fw_ver, "%d.", &fw_major);
907
908 return fw_major;
909}
910
911int be_update_queues(struct be_adapter *adapter); 754int be_update_queues(struct be_adapter *adapter);
912int be_poll(struct napi_struct *napi, int budget); 755int be_poll(struct napi_struct *napi, int budget);
913 756
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index fead5c65a4f0..36916cfa70f9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,6 +19,22 @@
19#include "be.h" 19#include "be.h"
20#include "be_cmds.h" 20#include "be_cmds.h"
21 21
22static char *be_port_misconfig_evt_desc[] = {
23 "A valid SFP module detected",
24 "Optics faulted/ incorrectly installed/ not installed.",
25 "Optics of two types installed.",
26 "Incompatible optics.",
27 "Unknown port SFP status"
28};
29
30static char *be_port_misconfig_remedy_desc[] = {
31 "",
32 "Reseat optics. If issue not resolved, replace",
33 "Remove one optic or install matching pair of optics",
34 "Replace with compatible optics for card to function",
35 ""
36};
37
22static struct be_cmd_priv_map cmd_priv_map[] = { 38static struct be_cmd_priv_map cmd_priv_map[] = {
23 { 39 {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 40 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
@@ -249,6 +265,29 @@ static void be_async_link_state_process(struct be_adapter *adapter,
249 evt->port_link_status & LINK_STATUS_MASK); 265 evt->port_link_status & LINK_STATUS_MASK);
250} 266}
251 267
268static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
269 struct be_mcc_compl *compl)
270{
271 struct be_async_event_misconfig_port *evt =
272 (struct be_async_event_misconfig_port *)compl;
273 u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
274 struct device *dev = &adapter->pdev->dev;
275 u8 port_misconfig_evt;
276
277 port_misconfig_evt =
278 ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
279
280 /* Log an error message that would allow a user to determine
281 * whether the SFPs have an issue
282 */
283 dev_info(dev, "Port %c: %s %s", adapter->port_name,
284 be_port_misconfig_evt_desc[port_misconfig_evt],
285 be_port_misconfig_remedy_desc[port_misconfig_evt]);
286
287 if (port_misconfig_evt == INCOMPATIBLE_SFP)
288 adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
289}
290
252/* Grp5 CoS Priority evt */ 291/* Grp5 CoS Priority evt */
253static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 292static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
254 struct be_mcc_compl *compl) 293 struct be_mcc_compl *compl)
@@ -334,6 +373,16 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
334 } 373 }
335} 374}
336 375
376static void be_async_sliport_evt_process(struct be_adapter *adapter,
377 struct be_mcc_compl *cmp)
378{
379 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
380 ASYNC_EVENT_TYPE_MASK;
381
382 if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
383 be_async_port_misconfig_event_process(adapter, cmp);
384}
385
337static inline bool is_link_state_evt(u32 flags) 386static inline bool is_link_state_evt(u32 flags)
338{ 387{
339 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 388 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
@@ -352,6 +401,12 @@ static inline bool is_dbg_evt(u32 flags)
352 ASYNC_EVENT_CODE_QNQ; 401 ASYNC_EVENT_CODE_QNQ;
353} 402}
354 403
404static inline bool is_sliport_evt(u32 flags)
405{
406 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
407 ASYNC_EVENT_CODE_SLIPORT;
408}
409
355static void be_mcc_event_process(struct be_adapter *adapter, 410static void be_mcc_event_process(struct be_adapter *adapter,
356 struct be_mcc_compl *compl) 411 struct be_mcc_compl *compl)
357{ 412{
@@ -361,6 +416,8 @@ static void be_mcc_event_process(struct be_adapter *adapter,
361 be_async_grp5_evt_process(adapter, compl); 416 be_async_grp5_evt_process(adapter, compl);
362 else if (is_dbg_evt(compl->flags)) 417 else if (is_dbg_evt(compl->flags))
363 be_async_dbg_evt_process(adapter, compl); 418 be_async_dbg_evt_process(adapter, compl);
419 else if (is_sliport_evt(compl->flags))
420 be_async_sliport_evt_process(adapter, compl);
364} 421}
365 422
366static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 423static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -573,7 +630,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
573{ 630{
574#define SLIPORT_READY_TIMEOUT 30 631#define SLIPORT_READY_TIMEOUT 30
575 u32 sliport_status; 632 u32 sliport_status;
576 int status = 0, i; 633 int i;
577 634
578 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { 635 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
579 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 636 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
@@ -584,9 +641,9 @@ static int lancer_wait_ready(struct be_adapter *adapter)
584 } 641 }
585 642
586 if (i == SLIPORT_READY_TIMEOUT) 643 if (i == SLIPORT_READY_TIMEOUT)
587 status = -1; 644 return sliport_status ? : -1;
588 645
589 return status; 646 return 0;
590} 647}
591 648
592static bool lancer_provisioning_error(struct be_adapter *adapter) 649static bool lancer_provisioning_error(struct be_adapter *adapter)
@@ -624,7 +681,7 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
624 iowrite32(SLI_PORT_CONTROL_IP_MASK, 681 iowrite32(SLI_PORT_CONTROL_IP_MASK,
625 adapter->db + SLIPORT_CONTROL_OFFSET); 682 adapter->db + SLIPORT_CONTROL_OFFSET);
626 683
627 /* check adapter has corrected the error */ 684 /* check if adapter has corrected the error */
628 status = lancer_wait_ready(adapter); 685 status = lancer_wait_ready(adapter);
629 sliport_status = ioread32(adapter->db + 686 sliport_status = ioread32(adapter->db +
630 SLIPORT_STATUS_OFFSET); 687 SLIPORT_STATUS_OFFSET);
@@ -655,7 +712,11 @@ int be_fw_wait_ready(struct be_adapter *adapter)
655 712
656 if (lancer_chip(adapter)) { 713 if (lancer_chip(adapter)) {
657 status = lancer_wait_ready(adapter); 714 status = lancer_wait_ready(adapter);
658 return status; 715 if (status) {
716 stage = status;
717 goto err;
718 }
719 return 0;
659 } 720 }
660 721
661 do { 722 do {
@@ -671,7 +732,8 @@ int be_fw_wait_ready(struct be_adapter *adapter)
671 timeout += 2; 732 timeout += 2;
672 } while (timeout < 60); 733 } while (timeout < 60);
673 734
674 dev_err(dev, "POST timeout; stage=0x%x\n", stage); 735err:
736 dev_err(dev, "POST timeout; stage=%#x\n", stage);
675 return -1; 737 return -1;
676} 738}
677 739
@@ -1166,9 +1228,15 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1166 ctxt, 1); 1228 ctxt, 1);
1167 } 1229 }
1168 1230
1169 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 1231 /* Subscribe to Link State, Sliport Event and Group 5 Events
1170 req->async_event_bitmap[0] = cpu_to_le32(0x00000022); 1232 * (bits 1, 5 and 17 set)
1171 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ); 1233 */
1234 req->async_event_bitmap[0] =
1235 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1236 BIT(ASYNC_EVENT_CODE_GRP_5) |
1237 BIT(ASYNC_EVENT_CODE_QNQ) |
1238 BIT(ASYNC_EVENT_CODE_SLIPORT));
1239
1172 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1240 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1173 1241
1174 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1242 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -1881,7 +1949,7 @@ err:
1881 return status; 1949 return status;
1882} 1950}
1883 1951
1884int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1952static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1885{ 1953{
1886 struct be_mcc_wrb *wrb; 1954 struct be_mcc_wrb *wrb;
1887 struct be_dma_mem *mem = &adapter->rx_filter; 1955 struct be_dma_mem *mem = &adapter->rx_filter;
@@ -1901,31 +1969,13 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1901 wrb, mem); 1969 wrb, mem);
1902 1970
1903 req->if_id = cpu_to_le32(adapter->if_handle); 1971 req->if_id = cpu_to_le32(adapter->if_handle);
1904 if (flags & IFF_PROMISC) { 1972 req->if_flags_mask = cpu_to_le32(flags);
1905 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | 1973 req->if_flags = (value == ON) ? req->if_flags_mask : 0;
1906 BE_IF_FLAGS_VLAN_PROMISCUOUS | 1974
1907 BE_IF_FLAGS_MCAST_PROMISCUOUS); 1975 if (flags & BE_IF_FLAGS_MULTICAST) {
1908 if (value == ON)
1909 req->if_flags =
1910 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1911 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1912 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1913 } else if (flags & IFF_ALLMULTI) {
1914 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1915 req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1916 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1917 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1918
1919 if (value == ON)
1920 req->if_flags =
1921 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1922 } else {
1923 struct netdev_hw_addr *ha; 1976 struct netdev_hw_addr *ha;
1924 int i = 0; 1977 int i = 0;
1925 1978
1926 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1927 req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1928
1929 /* Reset mcast promisc mode if already set by setting mask 1979 /* Reset mcast promisc mode if already set by setting mask
1930 * and not setting flags field 1980 * and not setting flags field
1931 */ 1981 */
@@ -1937,24 +1987,26 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1937 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1987 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1938 } 1988 }
1939 1989
1940 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1941 req->if_flags_mask) {
1942 dev_warn(&adapter->pdev->dev,
1943 "Cannot set rx filter flags 0x%x\n",
1944 req->if_flags_mask);
1945 dev_warn(&adapter->pdev->dev,
1946 "Interface is capable of 0x%x flags only\n",
1947 be_if_cap_flags(adapter));
1948 }
1949 req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
1950
1951 status = be_mcc_notify_wait(adapter); 1990 status = be_mcc_notify_wait(adapter);
1952
1953err: 1991err:
1954 spin_unlock_bh(&adapter->mcc_lock); 1992 spin_unlock_bh(&adapter->mcc_lock);
1955 return status; 1993 return status;
1956} 1994}
1957 1995
1996int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1997{
1998 struct device *dev = &adapter->pdev->dev;
1999
2000 if ((flags & be_if_cap_flags(adapter)) != flags) {
2001 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
2002 dev_warn(dev, "Interface is capable of 0x%x flags only\n",
2003 be_if_cap_flags(adapter));
2004 }
2005 flags &= be_if_cap_flags(adapter);
2006
2007 return __be_cmd_rx_filter(adapter, flags, value);
2008}
2009
1958/* Uses synchrounous mcc */ 2010/* Uses synchrounous mcc */
1959int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 2011int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1960{ 2012{
@@ -2355,6 +2407,24 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
2355 return status; 2407 return status;
2356} 2408}
2357 2409
2410int be_cmd_query_sfp_info(struct be_adapter *adapter)
2411{
2412 u8 page_data[PAGE_DATA_LEN];
2413 int status;
2414
2415 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2416 page_data);
2417 if (!status) {
2418 strlcpy(adapter->phy.vendor_name, page_data +
2419 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2420 strlcpy(adapter->phy.vendor_pn,
2421 page_data + SFP_VENDOR_PN_OFFSET,
2422 SFP_VENDOR_NAME_LEN - 1);
2423 }
2424
2425 return status;
2426}
2427
2358int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) 2428int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2359{ 2429{
2360 struct lancer_cmd_req_delete_object *req; 2430 struct lancer_cmd_req_delete_object *req;
@@ -2431,7 +2501,8 @@ err_unlock:
2431} 2501}
2432 2502
2433int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2503int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2434 u32 flash_type, u32 flash_opcode, u32 buf_size) 2504 u32 flash_type, u32 flash_opcode, u32 img_offset,
2505 u32 buf_size)
2435{ 2506{
2436 struct be_mcc_wrb *wrb; 2507 struct be_mcc_wrb *wrb;
2437 struct be_cmd_write_flashrom *req; 2508 struct be_cmd_write_flashrom *req;
@@ -2452,6 +2523,9 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2452 cmd); 2523 cmd);
2453 2524
2454 req->params.op_type = cpu_to_le32(flash_type); 2525 req->params.op_type = cpu_to_le32(flash_type);
2526 if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2527 req->params.offset = cpu_to_le32(img_offset);
2528
2455 req->params.op_code = cpu_to_le32(flash_opcode); 2529 req->params.op_code = cpu_to_le32(flash_opcode);
2456 req->params.data_buf_size = cpu_to_le32(buf_size); 2530 req->params.data_buf_size = cpu_to_le32(buf_size);
2457 2531
@@ -2472,10 +2546,10 @@ err_unlock:
2472} 2546}
2473 2547
2474int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2548int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2475 u16 optype, int offset) 2549 u16 img_optype, u32 img_offset, u32 crc_offset)
2476{ 2550{
2477 struct be_mcc_wrb *wrb;
2478 struct be_cmd_read_flash_crc *req; 2551 struct be_cmd_read_flash_crc *req;
2552 struct be_mcc_wrb *wrb;
2479 int status; 2553 int status;
2480 2554
2481 spin_lock_bh(&adapter->mcc_lock); 2555 spin_lock_bh(&adapter->mcc_lock);
@@ -2491,9 +2565,13 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2491 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2565 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2492 wrb, NULL); 2566 wrb, NULL);
2493 2567
2494 req->params.op_type = cpu_to_le32(optype); 2568 req->params.op_type = cpu_to_le32(img_optype);
2569 if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2570 req->params.offset = cpu_to_le32(img_offset + crc_offset);
2571 else
2572 req->params.offset = cpu_to_le32(crc_offset);
2573
2495 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2574 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2496 req->params.offset = cpu_to_le32(offset);
2497 req->params.data_buf_size = cpu_to_le32(0x4); 2575 req->params.data_buf_size = cpu_to_le32(0x4);
2498 2576
2499 status = be_mcc_notify_wait(adapter); 2577 status = be_mcc_notify_wait(adapter);
@@ -2742,7 +2820,7 @@ err:
2742 return status; 2820 return status;
2743} 2821}
2744 2822
2745int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 2823static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2746{ 2824{
2747 struct be_mcc_wrb *wrb; 2825 struct be_mcc_wrb *wrb;
2748 struct be_cmd_req_set_qos *req; 2826 struct be_cmd_req_set_qos *req;
@@ -3236,6 +3314,24 @@ err:
3236 return status; 3314 return status;
3237} 3315}
3238 3316
3317static bool be_is_wol_excluded(struct be_adapter *adapter)
3318{
3319 struct pci_dev *pdev = adapter->pdev;
3320
3321 if (!be_physfn(adapter))
3322 return true;
3323
3324 switch (pdev->subsystem_device) {
3325 case OC_SUBSYS_DEVICE_ID1:
3326 case OC_SUBSYS_DEVICE_ID2:
3327 case OC_SUBSYS_DEVICE_ID3:
3328 case OC_SUBSYS_DEVICE_ID4:
3329 return true;
3330 default:
3331 return false;
3332 }
3333}
3334
3239int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) 3335int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3240{ 3336{
3241 struct be_mcc_wrb *wrb; 3337 struct be_mcc_wrb *wrb;
@@ -3422,42 +3518,34 @@ err:
3422 return status; 3518 return status;
3423} 3519}
3424 3520
3425int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name) 3521int be_cmd_query_port_name(struct be_adapter *adapter)
3426{ 3522{
3427 struct be_mcc_wrb *wrb;
3428 struct be_cmd_req_get_port_name *req; 3523 struct be_cmd_req_get_port_name *req;
3524 struct be_mcc_wrb *wrb;
3429 int status; 3525 int status;
3430 3526
3431 if (!lancer_chip(adapter)) { 3527 if (mutex_lock_interruptible(&adapter->mbox_lock))
3432 *port_name = adapter->hba_port_num + '0'; 3528 return -1;
3433 return 0;
3434 }
3435
3436 spin_lock_bh(&adapter->mcc_lock);
3437
3438 wrb = wrb_from_mccq(adapter);
3439 if (!wrb) {
3440 status = -EBUSY;
3441 goto err;
3442 }
3443 3529
3530 wrb = wrb_from_mbox(adapter);
3444 req = embedded_payload(wrb); 3531 req = embedded_payload(wrb);
3445 3532
3446 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3533 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3447 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb, 3534 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3448 NULL); 3535 NULL);
3449 req->hdr.version = 1; 3536 if (!BEx_chip(adapter))
3537 req->hdr.version = 1;
3450 3538
3451 status = be_mcc_notify_wait(adapter); 3539 status = be_mbox_notify_wait(adapter);
3452 if (!status) { 3540 if (!status) {
3453 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); 3541 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3454 3542
3455 *port_name = resp->port_name[adapter->hba_port_num]; 3543 adapter->port_name = resp->port_name[adapter->hba_port_num];
3456 } else { 3544 } else {
3457 *port_name = adapter->hba_port_num + '0'; 3545 adapter->port_name = adapter->hba_port_num + '0';
3458 } 3546 }
3459err: 3547
3460 spin_unlock_bh(&adapter->mcc_lock); 3548 mutex_unlock(&adapter->mbox_lock);
3461 return status; 3549 return status;
3462} 3550}
3463 3551
@@ -3751,6 +3839,7 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3751 be_reset_nic_desc(&nic_desc); 3839 be_reset_nic_desc(&nic_desc);
3752 nic_desc.pf_num = adapter->pf_number; 3840 nic_desc.pf_num = adapter->pf_number;
3753 nic_desc.vf_num = domain; 3841 nic_desc.vf_num = domain;
3842 nic_desc.bw_min = 0;
3754 if (lancer_chip(adapter)) { 3843 if (lancer_chip(adapter)) {
3755 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 3844 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3756 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 3845 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
@@ -4092,7 +4181,7 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4092 int status; 4181 int status;
4093 4182
4094 if (BEx_chip(adapter) || lancer_chip(adapter)) 4183 if (BEx_chip(adapter) || lancer_chip(adapter))
4095 return 0; 4184 return -EOPNOTSUPP;
4096 4185
4097 spin_lock_bh(&adapter->mcc_lock); 4186 spin_lock_bh(&adapter->mcc_lock);
4098 4187
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index eb5085d6794f..db761e8e42a3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -44,10 +44,10 @@ struct be_mcc_wrb {
44 } payload; 44 } payload;
45}; 45};
46 46
47#define CQE_FLAGS_VALID_MASK (1 << 31) 47#define CQE_FLAGS_VALID_MASK BIT(31)
48#define CQE_FLAGS_ASYNC_MASK (1 << 30) 48#define CQE_FLAGS_ASYNC_MASK BIT(30)
49#define CQE_FLAGS_COMPLETED_MASK (1 << 28) 49#define CQE_FLAGS_COMPLETED_MASK BIT(28)
50#define CQE_FLAGS_CONSUMED_MASK (1 << 27) 50#define CQE_FLAGS_CONSUMED_MASK BIT(27)
51 51
52/* Completion Status */ 52/* Completion Status */
53enum mcc_base_status { 53enum mcc_base_status {
@@ -102,6 +102,8 @@ struct be_mcc_compl {
102#define ASYNC_EVENT_PVID_STATE 0x3 102#define ASYNC_EVENT_PVID_STATE 0x3
103#define ASYNC_EVENT_CODE_QNQ 0x6 103#define ASYNC_EVENT_CODE_QNQ 0x6
104#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1 104#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
105#define ASYNC_EVENT_CODE_SLIPORT 0x11
106#define ASYNC_EVENT_PORT_MISCONFIG 0x9
105 107
106enum { 108enum {
107 LINK_DOWN = 0x0, 109 LINK_DOWN = 0x0,
@@ -169,6 +171,15 @@ struct be_async_event_qnq {
169 u32 flags; 171 u32 flags;
170} __packed; 172} __packed;
171 173
174#define INCOMPATIBLE_SFP 0x3
175/* async event indicating misconfigured port */
176struct be_async_event_misconfig_port {
177 u32 event_data_word1;
178 u32 event_data_word2;
179 u32 rsvd0;
180 u32 flags;
181} __packed;
182
172struct be_mcc_mailbox { 183struct be_mcc_mailbox {
173 struct be_mcc_wrb wrb; 184 struct be_mcc_wrb wrb;
174 struct be_mcc_compl compl; 185 struct be_mcc_compl compl;
@@ -586,6 +597,10 @@ enum be_if_flags {
586 BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ 597 BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
587 BE_IF_FLAGS_UNTAGGED) 598 BE_IF_FLAGS_UNTAGGED)
588 599
600#define BE_IF_FLAGS_ALL_PROMISCUOUS (BE_IF_FLAGS_PROMISCUOUS | \
601 BE_IF_FLAGS_VLAN_PROMISCUOUS |\
602 BE_IF_FLAGS_MCAST_PROMISCUOUS)
603
589/* An RX interface is an object with one or more MAC addresses and 604/* An RX interface is an object with one or more MAC addresses and
590 * filtering capabilities. */ 605 * filtering capabilities. */
591struct be_cmd_req_if_create { 606struct be_cmd_req_if_create {
@@ -1024,6 +1039,8 @@ enum {
1024#define SFP_PLUS_SFF_8472_COMP 0x5E 1039#define SFP_PLUS_SFF_8472_COMP 0x5E
1025#define SFP_PLUS_CABLE_TYPE_OFFSET 0x8 1040#define SFP_PLUS_CABLE_TYPE_OFFSET 0x8
1026#define SFP_PLUS_COPPER_CABLE 0x4 1041#define SFP_PLUS_COPPER_CABLE 0x4
1042#define SFP_VENDOR_NAME_OFFSET 0x14
1043#define SFP_VENDOR_PN_OFFSET 0x28
1027 1044
1028#define PAGE_DATA_LEN 256 1045#define PAGE_DATA_LEN 256
1029struct be_cmd_resp_port_type { 1046struct be_cmd_resp_port_type {
@@ -1091,6 +1108,10 @@ struct be_cmd_req_query_fw_cfg {
1091 u32 rsvd[31]; 1108 u32 rsvd[31];
1092}; 1109};
1093 1110
1111/* ASIC revisions */
1112#define ASIC_REV_B0 0x10
1113#define ASIC_REV_P2 0x11
1114
1094struct be_cmd_resp_query_fw_cfg { 1115struct be_cmd_resp_query_fw_cfg {
1095 struct be_cmd_resp_hdr hdr; 1116 struct be_cmd_resp_hdr hdr;
1096 u32 be_config_number; 1117 u32 be_config_number;
@@ -1161,7 +1182,173 @@ struct be_cmd_resp_get_beacon_state {
1161 u8 rsvd0[3]; 1182 u8 rsvd0[3];
1162} __packed; 1183} __packed;
1163 1184
1185/* Flashrom related descriptors */
1186#define MAX_FLASH_COMP 32
1187
1188#define OPTYPE_ISCSI_ACTIVE 0
1189#define OPTYPE_REDBOOT 1
1190#define OPTYPE_BIOS 2
1191#define OPTYPE_PXE_BIOS 3
1192#define OPTYPE_OFFSET_SPECIFIED 7
1193#define OPTYPE_FCOE_BIOS 8
1194#define OPTYPE_ISCSI_BACKUP 9
1195#define OPTYPE_FCOE_FW_ACTIVE 10
1196#define OPTYPE_FCOE_FW_BACKUP 11
1197#define OPTYPE_NCSI_FW 13
1198#define OPTYPE_REDBOOT_DIR 18
1199#define OPTYPE_REDBOOT_CONFIG 19
1200#define OPTYPE_SH_PHY_FW 21
1201#define OPTYPE_FLASHISM_JUMPVECTOR 22
1202#define OPTYPE_UFI_DIR 23
1203#define OPTYPE_PHY_FW 99
1204
1205#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 262144 /* Max OPTION ROM image sz */
1206#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 262144 /* Max Redboot image sz */
1207#define FLASH_IMAGE_MAX_SIZE_g2 1310720 /* Max firmware image size */
1208
1209#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 262144
1210#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
1211#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 524288 /* Max OPTION ROM image sz */
1212#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 1048576 /* Max Redboot image sz */
1213#define FLASH_IMAGE_MAX_SIZE_g3 2097152 /* Max firmware image size */
1214
1215/* Offsets for components on Flash. */
1216#define FLASH_REDBOOT_START_g2 0
1217#define FLASH_FCoE_BIOS_START_g2 524288
1218#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 1048576
1219#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 2359296
1220#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 3670016
1221#define FLASH_FCoE_BACKUP_IMAGE_START_g2 4980736
1222#define FLASH_iSCSI_BIOS_START_g2 7340032
1223#define FLASH_PXE_BIOS_START_g2 7864320
1224
1225#define FLASH_REDBOOT_START_g3 262144
1226#define FLASH_PHY_FW_START_g3 1310720
1227#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 2097152
1228#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 4194304
1229#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 6291456
1230#define FLASH_FCoE_BACKUP_IMAGE_START_g3 8388608
1231#define FLASH_iSCSI_BIOS_START_g3 12582912
1232#define FLASH_PXE_BIOS_START_g3 13107200
1233#define FLASH_FCoE_BIOS_START_g3 13631488
1234#define FLASH_NCSI_START_g3 15990784
1235
1236#define IMAGE_NCSI 16
1237#define IMAGE_OPTION_ROM_PXE 32
1238#define IMAGE_OPTION_ROM_FCoE 33
1239#define IMAGE_OPTION_ROM_ISCSI 34
1240#define IMAGE_FLASHISM_JUMPVECTOR 48
1241#define IMAGE_FIRMWARE_iSCSI 160
1242#define IMAGE_FIRMWARE_FCoE 162
1243#define IMAGE_FIRMWARE_BACKUP_iSCSI 176
1244#define IMAGE_FIRMWARE_BACKUP_FCoE 178
1245#define IMAGE_FIRMWARE_PHY 192
1246#define IMAGE_REDBOOT_DIR 208
1247#define IMAGE_REDBOOT_CONFIG 209
1248#define IMAGE_UFI_DIR 210
1249#define IMAGE_BOOT_CODE 224
1250
1251struct controller_id {
1252 u32 vendor;
1253 u32 device;
1254 u32 subvendor;
1255 u32 subdevice;
1256};
1257
1258struct flash_comp {
1259 unsigned long offset;
1260 int optype;
1261 int size;
1262 int img_type;
1263};
1264
1265struct image_hdr {
1266 u32 imageid;
1267 u32 imageoffset;
1268 u32 imagelength;
1269 u32 image_checksum;
1270 u8 image_version[32];
1271};
1272
1273struct flash_file_hdr_g2 {
1274 u8 sign[32];
1275 u32 cksum;
1276 u32 antidote;
1277 struct controller_id cont_id;
1278 u32 file_len;
1279 u32 chunk_num;
1280 u32 total_chunks;
1281 u32 num_imgs;
1282 u8 build[24];
1283};
1284
1285/* First letter of the build version of the image */
1286#define BLD_STR_UFI_TYPE_BE2 '2'
1287#define BLD_STR_UFI_TYPE_BE3 '3'
1288#define BLD_STR_UFI_TYPE_SH '4'
1289
1290struct flash_file_hdr_g3 {
1291 u8 sign[52];
1292 u8 ufi_version[4];
1293 u32 file_len;
1294 u32 cksum;
1295 u32 antidote;
1296 u32 num_imgs;
1297 u8 build[24];
1298 u8 asic_type_rev;
1299 u8 rsvd[31];
1300};
1301
1302struct flash_section_hdr {
1303 u32 format_rev;
1304 u32 cksum;
1305 u32 antidote;
1306 u32 num_images;
1307 u8 id_string[128];
1308 u32 rsvd[4];
1309} __packed;
1310
1311struct flash_section_hdr_g2 {
1312 u32 format_rev;
1313 u32 cksum;
1314 u32 antidote;
1315 u32 build_num;
1316 u8 id_string[128];
1317 u32 rsvd[8];
1318} __packed;
1319
1320struct flash_section_entry {
1321 u32 type;
1322 u32 offset;
1323 u32 pad_size;
1324 u32 image_size;
1325 u32 cksum;
1326 u32 entry_point;
1327 u16 optype;
1328 u16 rsvd0;
1329 u32 rsvd1;
1330 u8 ver_data[32];
1331} __packed;
1332
1333struct flash_section_info {
1334 u8 cookie[32];
1335 struct flash_section_hdr fsec_hdr;
1336 struct flash_section_entry fsec_entry[32];
1337} __packed;
1338
1339struct flash_section_info_g2 {
1340 u8 cookie[32];
1341 struct flash_section_hdr_g2 fsec_hdr;
1342 struct flash_section_entry fsec_entry[32];
1343} __packed;
1344
1164/****************** Firmware Flash ******************/ 1345/****************** Firmware Flash ******************/
1346#define FLASHROM_OPER_FLASH 1
1347#define FLASHROM_OPER_SAVE 2
1348#define FLASHROM_OPER_REPORT 4
1349#define FLASHROM_OPER_PHY_FLASH 9
1350#define FLASHROM_OPER_PHY_SAVE 10
1351
1165struct flashrom_params { 1352struct flashrom_params {
1166 u32 op_code; 1353 u32 op_code;
1167 u32 op_type; 1354 u32 op_type;
@@ -1366,6 +1553,7 @@ enum {
1366 PHY_TYPE_QSFP, 1553 PHY_TYPE_QSFP,
1367 PHY_TYPE_KR4_40GB, 1554 PHY_TYPE_KR4_40GB,
1368 PHY_TYPE_KR2_20GB, 1555 PHY_TYPE_KR2_20GB,
1556 PHY_TYPE_TN_8022,
1369 PHY_TYPE_DISABLED = 255 1557 PHY_TYPE_DISABLED = 255
1370}; 1558};
1371 1559
@@ -1429,6 +1617,20 @@ struct be_cmd_req_set_qos {
1429}; 1617};
1430 1618
1431/*********************** Controller Attributes ***********************/ 1619/*********************** Controller Attributes ***********************/
1620struct mgmt_hba_attribs {
1621 u32 rsvd0[24];
1622 u8 controller_model_number[32];
1623 u32 rsvd1[79];
1624 u8 rsvd2[3];
1625 u8 phy_port;
1626 u32 rsvd3[13];
1627} __packed;
1628
1629struct mgmt_controller_attrib {
1630 struct mgmt_hba_attribs hba_attribs;
1631 u32 rsvd0[10];
1632} __packed;
1633
1432struct be_cmd_req_cntl_attribs { 1634struct be_cmd_req_cntl_attribs {
1433 struct be_cmd_req_hdr hdr; 1635 struct be_cmd_req_hdr hdr;
1434}; 1636};
@@ -2070,8 +2272,10 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
2070int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, 2272int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2071 u8 page_num, u8 *data); 2273 u8 page_num, u8 *data);
2072int be_cmd_query_cable_type(struct be_adapter *adapter); 2274int be_cmd_query_cable_type(struct be_adapter *adapter);
2275int be_cmd_query_sfp_info(struct be_adapter *adapter);
2073int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 2276int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2074 u32 flash_oper, u32 flash_opcode, u32 buf_size); 2277 u32 flash_oper, u32 flash_opcode, u32 img_offset,
2278 u32 buf_size);
2075int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2279int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2076 u32 data_size, u32 data_offset, 2280 u32 data_size, u32 data_offset,
2077 const char *obj_name, u32 *data_written, 2281 const char *obj_name, u32 *data_written,
@@ -2081,7 +2285,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2081 u32 *data_read, u32 *eof, u8 *addn_status); 2285 u32 *data_read, u32 *eof, u8 *addn_status);
2082int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name); 2286int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name);
2083int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2287int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2084 u16 optype, int offset); 2288 u16 img_optype, u32 img_offset, u32 crc_offset);
2085int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 2289int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2086 struct be_dma_mem *nonemb_cmd); 2290 struct be_dma_mem *nonemb_cmd);
2087int be_cmd_fw_init(struct be_adapter *adapter); 2291int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2136,7 +2340,7 @@ int lancer_initiate_dump(struct be_adapter *adapter);
2136int lancer_delete_dump(struct be_adapter *adapter); 2340int lancer_delete_dump(struct be_adapter *adapter);
2137bool dump_present(struct be_adapter *adapter); 2341bool dump_present(struct be_adapter *adapter);
2138int lancer_test_and_set_rdy_state(struct be_adapter *adapter); 2342int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
2139int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); 2343int be_cmd_query_port_name(struct be_adapter *adapter);
2140int be_cmd_get_func_config(struct be_adapter *adapter, 2344int be_cmd_get_func_config(struct be_adapter *adapter,
2141 struct be_resources *res); 2345 struct be_resources *res);
2142int be_cmd_get_profile_config(struct be_adapter *adapter, 2346int be_cmd_get_profile_config(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 73a500ccbf69..4d2de4700769 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -193,8 +193,6 @@ static const struct be_ethtool_stat et_tx_stats[] = {
193 {DRVSTAT_TX_INFO(tx_pkts)}, 193 {DRVSTAT_TX_INFO(tx_pkts)},
194 /* Number of skbs queued for trasmission by the driver */ 194 /* Number of skbs queued for trasmission by the driver */
195 {DRVSTAT_TX_INFO(tx_reqs)}, 195 {DRVSTAT_TX_INFO(tx_reqs)},
196 /* Number of TX work request blocks DMAed to HW */
197 {DRVSTAT_TX_INFO(tx_wrbs)},
198 /* Number of times the TX queue was stopped due to lack 196 /* Number of times the TX queue was stopped due to lack
199 * of spaces in the TXQ. 197 * of spaces in the TXQ.
200 */ 198 */
@@ -707,15 +705,17 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
707 705
708 if (ecmd->autoneg != adapter->phy.fc_autoneg) 706 if (ecmd->autoneg != adapter->phy.fc_autoneg)
709 return -EINVAL; 707 return -EINVAL;
710 adapter->tx_fc = ecmd->tx_pause;
711 adapter->rx_fc = ecmd->rx_pause;
712 708
713 status = be_cmd_set_flow_control(adapter, 709 status = be_cmd_set_flow_control(adapter, ecmd->tx_pause,
714 adapter->tx_fc, adapter->rx_fc); 710 ecmd->rx_pause);
715 if (status) 711 if (status) {
716 dev_warn(&adapter->pdev->dev, "Pause param set failed\n"); 712 dev_warn(&adapter->pdev->dev, "Pause param set failed\n");
713 return be_cmd_status(status);
714 }
717 715
718 return be_cmd_status(status); 716 adapter->tx_fc = ecmd->tx_pause;
717 adapter->rx_fc = ecmd->rx_pause;
718 return 0;
719} 719}
720 720
721static int be_set_phys_id(struct net_device *netdev, 721static int be_set_phys_id(struct net_device *netdev,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 295ee0835ba0..48840889db62 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -75,7 +75,7 @@
75 * atomically without having to arbitrate for the PCI Interrupt Disable bit 75 * atomically without having to arbitrate for the PCI Interrupt Disable bit
76 * with the OS. 76 * with the OS.
77 */ 77 */
78#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */ 78#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK BIT(29) /* bit 29 */
79 79
80/********* PCI Function Capability *********/ 80/********* PCI Function Capability *********/
81#define BE_FUNCTION_CAPS_RSS 0x2 81#define BE_FUNCTION_CAPS_RSS 0x2
@@ -171,94 +171,6 @@
171#define RETRIEVE_FAT 0 171#define RETRIEVE_FAT 0
172#define QUERY_FAT 1 172#define QUERY_FAT 1
173 173
174/* Flashrom related descriptors */
175#define MAX_FLASH_COMP 32
176#define IMAGE_TYPE_FIRMWARE 160
177#define IMAGE_TYPE_BOOTCODE 224
178#define IMAGE_TYPE_OPTIONROM 32
179
180#define NUM_FLASHDIR_ENTRIES 32
181
182#define OPTYPE_ISCSI_ACTIVE 0
183#define OPTYPE_REDBOOT 1
184#define OPTYPE_BIOS 2
185#define OPTYPE_PXE_BIOS 3
186#define OPTYPE_FCOE_BIOS 8
187#define OPTYPE_ISCSI_BACKUP 9
188#define OPTYPE_FCOE_FW_ACTIVE 10
189#define OPTYPE_FCOE_FW_BACKUP 11
190#define OPTYPE_NCSI_FW 13
191#define OPTYPE_REDBOOT_DIR 18
192#define OPTYPE_REDBOOT_CONFIG 19
193#define OPTYPE_SH_PHY_FW 21
194#define OPTYPE_FLASHISM_JUMPVECTOR 22
195#define OPTYPE_UFI_DIR 23
196#define OPTYPE_PHY_FW 99
197#define TN_8022 13
198
199#define FLASHROM_OPER_PHY_FLASH 9
200#define FLASHROM_OPER_PHY_SAVE 10
201#define FLASHROM_OPER_FLASH 1
202#define FLASHROM_OPER_SAVE 2
203#define FLASHROM_OPER_REPORT 4
204
205#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
206#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
207#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
208#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
209#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
210#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
211#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
212#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144
213
214#define FLASH_NCSI_MAGIC (0x16032009)
215#define FLASH_NCSI_DISABLED (0)
216#define FLASH_NCSI_ENABLED (1)
217
218#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
219
220/* Offsets for components on Flash. */
221#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
222#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
223#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
224#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
225#define FLASH_iSCSI_BIOS_START_g2 (7340032)
226#define FLASH_PXE_BIOS_START_g2 (7864320)
227#define FLASH_FCoE_BIOS_START_g2 (524288)
228#define FLASH_REDBOOT_START_g2 (0)
229
230#define FLASH_NCSI_START_g3 (15990784)
231#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
232#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
233#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
234#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
235#define FLASH_iSCSI_BIOS_START_g3 (12582912)
236#define FLASH_PXE_BIOS_START_g3 (13107200)
237#define FLASH_FCoE_BIOS_START_g3 (13631488)
238#define FLASH_REDBOOT_START_g3 (262144)
239#define FLASH_PHY_FW_START_g3 1310720
240
241#define IMAGE_NCSI 16
242#define IMAGE_OPTION_ROM_PXE 32
243#define IMAGE_OPTION_ROM_FCoE 33
244#define IMAGE_OPTION_ROM_ISCSI 34
245#define IMAGE_FLASHISM_JUMPVECTOR 48
246#define IMAGE_FLASH_ISM 49
247#define IMAGE_JUMP_VECTOR 50
248#define IMAGE_FIRMWARE_iSCSI 160
249#define IMAGE_FIRMWARE_COMP_iSCSI 161
250#define IMAGE_FIRMWARE_FCoE 162
251#define IMAGE_FIRMWARE_COMP_FCoE 163
252#define IMAGE_FIRMWARE_BACKUP_iSCSI 176
253#define IMAGE_FIRMWARE_BACKUP_COMP_iSCSI 177
254#define IMAGE_FIRMWARE_BACKUP_FCoE 178
255#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
256#define IMAGE_FIRMWARE_PHY 192
257#define IMAGE_REDBOOT_DIR 208
258#define IMAGE_REDBOOT_CONFIG 209
259#define IMAGE_UFI_DIR 210
260#define IMAGE_BOOT_CODE 224
261
262/************* Rx Packet Type Encoding **************/ 174/************* Rx Packet Type Encoding **************/
263#define BE_UNICAST_PACKET 0 175#define BE_UNICAST_PACKET 0
264#define BE_MULTICAST_PACKET 1 176#define BE_MULTICAST_PACKET 1
@@ -281,10 +193,10 @@ struct be_eq_entry {
281/* TX Queue Descriptor */ 193/* TX Queue Descriptor */
282#define ETH_WRB_FRAG_LEN_MASK 0xFFFF 194#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
283struct be_eth_wrb { 195struct be_eth_wrb {
284 u32 frag_pa_hi; /* dword 0 */ 196 __le32 frag_pa_hi; /* dword 0 */
285 u32 frag_pa_lo; /* dword 1 */ 197 __le32 frag_pa_lo; /* dword 1 */
286 u32 rsvd0; /* dword 2 */ 198 u32 rsvd0; /* dword 2 */
287 u32 frag_len; /* dword 3: bits 0 - 15 */ 199 __le32 frag_len; /* dword 3: bits 0 - 15 */
288} __packed; 200} __packed;
289 201
290/* Pseudo amap definition for eth_hdr_wrb in which each bit of the 202/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
@@ -311,8 +223,13 @@ struct amap_eth_hdr_wrb {
311 u8 vlan_tag[16]; 223 u8 vlan_tag[16];
312} __packed; 224} __packed;
313 225
226#define TX_HDR_WRB_COMPL 1 /* word 2 */
227#define TX_HDR_WRB_EVT BIT(1) /* word 2 */
228#define TX_HDR_WRB_NUM_SHIFT 13 /* word 2: bits 13:17 */
229#define TX_HDR_WRB_NUM_MASK 0x1F /* word 2: bits 13:17 */
230
314struct be_eth_hdr_wrb { 231struct be_eth_hdr_wrb {
315 u32 dw[4]; 232 __le32 dw[4];
316}; 233};
317 234
318/********* Tx Compl Status Encoding *********/ 235/********* Tx Compl Status Encoding *********/
@@ -435,138 +352,3 @@ struct amap_eth_rx_compl_v1 {
435struct be_eth_rx_compl { 352struct be_eth_rx_compl {
436 u32 dw[4]; 353 u32 dw[4];
437}; 354};
438
439struct mgmt_hba_attribs {
440 u8 flashrom_version_string[32];
441 u8 manufacturer_name[32];
442 u32 supported_modes;
443 u32 rsvd0[3];
444 u8 ncsi_ver_string[12];
445 u32 default_extended_timeout;
446 u8 controller_model_number[32];
447 u8 controller_description[64];
448 u8 controller_serial_number[32];
449 u8 ip_version_string[32];
450 u8 firmware_version_string[32];
451 u8 bios_version_string[32];
452 u8 redboot_version_string[32];
453 u8 driver_version_string[32];
454 u8 fw_on_flash_version_string[32];
455 u32 functionalities_supported;
456 u16 max_cdblength;
457 u8 asic_revision;
458 u8 generational_guid[16];
459 u8 hba_port_count;
460 u16 default_link_down_timeout;
461 u8 iscsi_ver_min_max;
462 u8 multifunction_device;
463 u8 cache_valid;
464 u8 hba_status;
465 u8 max_domains_supported;
466 u8 phy_port;
467 u32 firmware_post_status;
468 u32 hba_mtu[8];
469 u32 rsvd1[4];
470};
471
472struct mgmt_controller_attrib {
473 struct mgmt_hba_attribs hba_attribs;
474 u16 pci_vendor_id;
475 u16 pci_device_id;
476 u16 pci_sub_vendor_id;
477 u16 pci_sub_system_id;
478 u8 pci_bus_number;
479 u8 pci_device_number;
480 u8 pci_function_number;
481 u8 interface_type;
482 u64 unique_identifier;
483 u32 rsvd0[5];
484};
485
486struct controller_id {
487 u32 vendor;
488 u32 device;
489 u32 subvendor;
490 u32 subdevice;
491};
492
493struct flash_comp {
494 unsigned long offset;
495 int optype;
496 int size;
497 int img_type;
498};
499
500struct image_hdr {
501 u32 imageid;
502 u32 imageoffset;
503 u32 imagelength;
504 u32 image_checksum;
505 u8 image_version[32];
506};
507struct flash_file_hdr_g2 {
508 u8 sign[32];
509 u32 cksum;
510 u32 antidote;
511 struct controller_id cont_id;
512 u32 file_len;
513 u32 chunk_num;
514 u32 total_chunks;
515 u32 num_imgs;
516 u8 build[24];
517};
518
519struct flash_file_hdr_g3 {
520 u8 sign[52];
521 u8 ufi_version[4];
522 u32 file_len;
523 u32 cksum;
524 u32 antidote;
525 u32 num_imgs;
526 u8 build[24];
527 u8 asic_type_rev;
528 u8 rsvd[31];
529};
530
531struct flash_section_hdr {
532 u32 format_rev;
533 u32 cksum;
534 u32 antidote;
535 u32 num_images;
536 u8 id_string[128];
537 u32 rsvd[4];
538} __packed;
539
540struct flash_section_hdr_g2 {
541 u32 format_rev;
542 u32 cksum;
543 u32 antidote;
544 u32 build_num;
545 u8 id_string[128];
546 u32 rsvd[8];
547} __packed;
548
549struct flash_section_entry {
550 u32 type;
551 u32 offset;
552 u32 pad_size;
553 u32 image_size;
554 u32 cksum;
555 u32 entry_point;
556 u16 optype;
557 u16 rsvd0;
558 u32 rsvd1;
559 u8 ver_data[32];
560} __packed;
561
562struct flash_section_info {
563 u8 cookie[32];
564 struct flash_section_hdr fsec_hdr;
565 struct flash_section_entry fsec_entry[32];
566} __packed;
567
568struct flash_section_info_g2 {
569 u8 cookie[32];
570 struct flash_section_hdr_g2 fsec_hdr;
571 struct flash_section_entry fsec_entry[32];
572} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d48806b5cd88..932b93a14965 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -662,48 +662,40 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
662 netif_carrier_off(netdev); 662 netif_carrier_off(netdev);
663} 663}
664 664
665static void be_tx_stats_update(struct be_tx_obj *txo, 665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
666 u32 wrb_cnt, u32 copied, u32 gso_segs,
667 bool stopped)
668{ 666{
669 struct be_tx_stats *stats = tx_stats(txo); 667 struct be_tx_stats *stats = tx_stats(txo);
670 668
671 u64_stats_update_begin(&stats->sync); 669 u64_stats_update_begin(&stats->sync);
672 stats->tx_reqs++; 670 stats->tx_reqs++;
673 stats->tx_wrbs += wrb_cnt; 671 stats->tx_bytes += skb->len;
674 stats->tx_bytes += copied; 672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
675 stats->tx_pkts += (gso_segs ? gso_segs : 1);
676 if (stopped)
677 stats->tx_stops++;
678 u64_stats_update_end(&stats->sync); 673 u64_stats_update_end(&stats->sync);
679} 674}
680 675
681/* Determine number of WRB entries needed to xmit data in an skb */ 676/* Returns number of WRBs needed for the skb */
682static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, 677static u32 skb_wrb_cnt(struct sk_buff *skb)
683 bool *dummy)
684{ 678{
685 int cnt = (skb->len > skb->data_len); 679 /* +1 for the header wrb */
686 680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
687 cnt += skb_shinfo(skb)->nr_frags;
688
689 /* to account for hdr wrb */
690 cnt++;
691 if (lancer_chip(adapter) || !(cnt & 1)) {
692 *dummy = false;
693 } else {
694 /* add a dummy to make it an even num */
695 cnt++;
696 *dummy = true;
697 }
698 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
699 return cnt;
700} 681}
701 682
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) 683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{ 684{
704 wrb->frag_pa_hi = upper_32_bits(addr); 685 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = addr & 0xFFFFFFFF; 686 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; 687 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
688 wrb->rsvd0 = 0;
689}
690
691/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
692 * to avoid the swap and shift/mask operations in wrb_fill().
693 */
694static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
695{
696 wrb->frag_pa_hi = 0;
697 wrb->frag_pa_lo = 0;
698 wrb->frag_len = 0;
707 wrb->rsvd0 = 0; 699 wrb->rsvd0 = 0;
708} 700}
709 701
@@ -713,7 +705,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
713 u8 vlan_prio; 705 u8 vlan_prio;
714 u16 vlan_tag; 706 u16 vlan_tag;
715 707
716 vlan_tag = vlan_tx_tag_get(skb); 708 vlan_tag = skb_vlan_tag_get(skb);
717 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 709 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 /* If vlan priority provided by OS is NOT in available bmap */ 710 /* If vlan priority provided by OS is NOT in available bmap */
719 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) 711 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
@@ -764,52 +756,57 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
764 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); 756 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
765 } 757 }
766 758
767 if (vlan_tx_tag_present(skb)) { 759 if (skb_vlan_tag_present(skb)) {
768 SET_TX_WRB_HDR_BITS(vlan, hdr, 1); 760 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
769 vlan_tag = be_get_tx_vlan_tag(adapter, skb); 761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
770 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); 762 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
771 } 763 }
772 764
773 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
774 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
775 SET_TX_WRB_HDR_BITS(event, hdr, 1);
776 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); 765 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
777 SET_TX_WRB_HDR_BITS(len, hdr, len); 766 SET_TX_WRB_HDR_BITS(len, hdr, len);
767
768 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
769 * When this hack is not needed, the evt bit is set while ringing DB
770 */
771 if (skip_hw_vlan)
772 SET_TX_WRB_HDR_BITS(event, hdr, 1);
778} 773}
779 774
780static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
781 bool unmap_single) 776 bool unmap_single)
782{ 777{
783 dma_addr_t dma; 778 dma_addr_t dma;
779 u32 frag_len = le32_to_cpu(wrb->frag_len);
784 780
785 be_dws_le_to_cpu(wrb, sizeof(*wrb));
786 781
787 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; 782 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
788 if (wrb->frag_len) { 783 (u64)le32_to_cpu(wrb->frag_pa_lo);
784 if (frag_len) {
789 if (unmap_single) 785 if (unmap_single)
790 dma_unmap_single(dev, dma, wrb->frag_len, 786 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
791 DMA_TO_DEVICE);
792 else 787 else
793 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE); 788 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
794 } 789 }
795} 790}
796 791
797static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, 792/* Returns the number of WRBs used up by the skb */
798 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, 793static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
799 bool skip_hw_vlan) 794 struct sk_buff *skb, bool skip_hw_vlan)
800{ 795{
801 dma_addr_t busaddr; 796 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
802 int i, copied = 0;
803 struct device *dev = &adapter->pdev->dev; 797 struct device *dev = &adapter->pdev->dev;
804 struct sk_buff *first_skb = skb; 798 struct be_queue_info *txq = &txo->q;
805 struct be_eth_wrb *wrb;
806 struct be_eth_hdr_wrb *hdr; 799 struct be_eth_hdr_wrb *hdr;
807 bool map_single = false; 800 bool map_single = false;
808 u16 map_head; 801 struct be_eth_wrb *wrb;
802 dma_addr_t busaddr;
803 u16 head = txq->head;
809 804
810 hdr = queue_head_node(txq); 805 hdr = queue_head_node(txq);
806 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
807 be_dws_cpu_to_le(hdr, sizeof(*hdr));
808
811 queue_head_inc(txq); 809 queue_head_inc(txq);
812 map_head = txq->head;
813 810
814 if (skb->len > skb->data_len) { 811 if (skb->len > skb->data_len) {
815 int len = skb_headlen(skb); 812 int len = skb_headlen(skb);
@@ -820,7 +817,6 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
820 map_single = true; 817 map_single = true;
821 wrb = queue_head_node(txq); 818 wrb = queue_head_node(txq);
822 wrb_fill(wrb, busaddr, len); 819 wrb_fill(wrb, busaddr, len);
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq); 820 queue_head_inc(txq);
825 copied += len; 821 copied += len;
826 } 822 }
@@ -834,35 +830,44 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
834 goto dma_err; 830 goto dma_err;
835 wrb = queue_head_node(txq); 831 wrb = queue_head_node(txq);
836 wrb_fill(wrb, busaddr, skb_frag_size(frag)); 832 wrb_fill(wrb, busaddr, skb_frag_size(frag));
837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq); 833 queue_head_inc(txq);
839 copied += skb_frag_size(frag); 834 copied += skb_frag_size(frag);
840 } 835 }
841 836
842 if (dummy_wrb) { 837 BUG_ON(txo->sent_skb_list[head]);
843 wrb = queue_head_node(txq); 838 txo->sent_skb_list[head] = skb;
844 wrb_fill(wrb, 0, 0); 839 txo->last_req_hdr = head;
845 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 840 atomic_add(wrb_cnt, &txq->used);
846 queue_head_inc(txq); 841 txo->last_req_wrb_cnt = wrb_cnt;
847 } 842 txo->pend_wrb_cnt += wrb_cnt;
848 843
849 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan); 844 be_tx_stats_update(txo, skb);
850 be_dws_cpu_to_le(hdr, sizeof(*hdr)); 845 return wrb_cnt;
851 846
852 return copied;
853dma_err: 847dma_err:
854 txq->head = map_head; 848 /* Bring the queue back to the state it was in before this
849 * routine was invoked.
850 */
851 txq->head = head;
852 /* skip the first wrb (hdr); it's not mapped */
853 queue_head_inc(txq);
855 while (copied) { 854 while (copied) {
856 wrb = queue_head_node(txq); 855 wrb = queue_head_node(txq);
857 unmap_tx_frag(dev, wrb, map_single); 856 unmap_tx_frag(dev, wrb, map_single);
858 map_single = false; 857 map_single = false;
859 copied -= wrb->frag_len; 858 copied -= le32_to_cpu(wrb->frag_len);
860 adapter->drv_stats.dma_map_errors++; 859 adapter->drv_stats.dma_map_errors++;
861 queue_head_inc(txq); 860 queue_head_inc(txq);
862 } 861 }
862 txq->head = head;
863 return 0; 863 return 0;
864} 864}
865 865
866static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
867{
868 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
869}
870
866static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, 871static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
867 struct sk_buff *skb, 872 struct sk_buff *skb,
868 bool *skip_hw_vlan) 873 bool *skip_hw_vlan)
@@ -873,7 +878,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
873 if (unlikely(!skb)) 878 if (unlikely(!skb))
874 return skb; 879 return skb;
875 880
876 if (vlan_tx_tag_present(skb)) 881 if (skb_vlan_tag_present(skb))
877 vlan_tag = be_get_tx_vlan_tag(adapter, skb); 882 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
878 883
879 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { 884 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
@@ -932,7 +937,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb)
932 937
933static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb) 938static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
934{ 939{
935 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; 940 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
936} 941}
937 942
938static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) 943static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
@@ -955,7 +960,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
955 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? 960 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
956 VLAN_ETH_HLEN : ETH_HLEN; 961 VLAN_ETH_HLEN : ETH_HLEN;
957 if (skb->len <= 60 && 962 if (skb->len <= 60 &&
958 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) && 963 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
959 is_ipv4_pkt(skb)) { 964 is_ipv4_pkt(skb)) {
960 ip = (struct iphdr *)ip_hdr(skb); 965 ip = (struct iphdr *)ip_hdr(skb);
961 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); 966 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
@@ -973,7 +978,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
973 * Manually insert VLAN in pkt. 978 * Manually insert VLAN in pkt.
974 */ 979 */
975 if (skb->ip_summed != CHECKSUM_PARTIAL && 980 if (skb->ip_summed != CHECKSUM_PARTIAL &&
976 vlan_tx_tag_present(skb)) { 981 skb_vlan_tag_present(skb)) {
977 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); 982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
978 if (unlikely(!skb)) 983 if (unlikely(!skb))
979 goto err; 984 goto err;
@@ -1030,52 +1035,64 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1030 return skb; 1035 return skb;
1031} 1036}
1032 1037
1038static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1039{
1040 struct be_queue_info *txq = &txo->q;
1041 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1042
1043 /* Mark the last request eventable if it hasn't been marked already */
1044 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1045 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1046
1047 /* compose a dummy wrb if there are odd set of wrbs to notify */
1048 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1049 wrb_fill_dummy(queue_head_node(txq));
1050 queue_head_inc(txq);
1051 atomic_inc(&txq->used);
1052 txo->pend_wrb_cnt++;
1053 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1054 TX_HDR_WRB_NUM_SHIFT);
1055 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1056 TX_HDR_WRB_NUM_SHIFT);
1057 }
1058 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1059 txo->pend_wrb_cnt = 0;
1060}
1061
1033static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) 1062static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1034{ 1063{
1064 bool skip_hw_vlan = false, flush = !skb->xmit_more;
1035 struct be_adapter *adapter = netdev_priv(netdev); 1065 struct be_adapter *adapter = netdev_priv(netdev);
1036 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; 1066 u16 q_idx = skb_get_queue_mapping(skb);
1067 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1037 struct be_queue_info *txq = &txo->q; 1068 struct be_queue_info *txq = &txo->q;
1038 bool dummy_wrb, stopped = false; 1069 u16 wrb_cnt;
1039 u32 wrb_cnt = 0, copied = 0;
1040 bool skip_hw_vlan = false;
1041 u32 start = txq->head;
1042 1070
1043 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); 1071 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1044 if (!skb) { 1072 if (unlikely(!skb))
1045 tx_stats(txo)->tx_drv_drops++; 1073 goto drop;
1046 return NETDEV_TX_OK;
1047 }
1048
1049 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1050 1074
1051 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb, 1075 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1052 skip_hw_vlan); 1076 if (unlikely(!wrb_cnt)) {
1053 if (copied) { 1077 dev_kfree_skb_any(skb);
1054 int gso_segs = skb_shinfo(skb)->gso_segs; 1078 goto drop;
1079 }
1055 1080
1056 /* record the sent skb in the sent_skb table */ 1081 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1057 BUG_ON(txo->sent_skb_list[start]); 1082 netif_stop_subqueue(netdev, q_idx);
1058 txo->sent_skb_list[start] = skb; 1083 tx_stats(txo)->tx_stops++;
1084 }
1059 1085
1060 /* Ensure txq has space for the next skb; Else stop the queue 1086 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1061 * *BEFORE* ringing the tx doorbell, so that we serialze the 1087 be_xmit_flush(adapter, txo);
1062 * tx compls of the current transmit which'll wake up the queue
1063 */
1064 atomic_add(wrb_cnt, &txq->used);
1065 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1066 txq->len) {
1067 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1068 stopped = true;
1069 }
1070 1088
1071 be_txq_notify(adapter, txo, wrb_cnt); 1089 return NETDEV_TX_OK;
1090drop:
1091 tx_stats(txo)->tx_drv_drops++;
1092 /* Flush the already enqueued tx requests */
1093 if (flush && txo->pend_wrb_cnt)
1094 be_xmit_flush(adapter, txo);
1072 1095
1073 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1074 } else {
1075 txq->head = start;
1076 tx_stats(txo)->tx_drv_drops++;
1077 dev_kfree_skb_any(skb);
1078 }
1079 return NETDEV_TX_OK; 1096 return NETDEV_TX_OK;
1080} 1097}
1081 1098
@@ -1096,6 +1113,43 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
1096 return 0; 1113 return 0;
1097} 1114}
1098 1115
1116static inline bool be_in_all_promisc(struct be_adapter *adapter)
1117{
1118 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1119 BE_IF_FLAGS_ALL_PROMISCUOUS;
1120}
1121
1122static int be_set_vlan_promisc(struct be_adapter *adapter)
1123{
1124 struct device *dev = &adapter->pdev->dev;
1125 int status;
1126
1127 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1128 return 0;
1129
1130 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1131 if (!status) {
1132 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1133 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1134 } else {
1135 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1136 }
1137 return status;
1138}
1139
1140static int be_clear_vlan_promisc(struct be_adapter *adapter)
1141{
1142 struct device *dev = &adapter->pdev->dev;
1143 int status;
1144
1145 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1146 if (!status) {
1147 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1148 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1149 }
1150 return status;
1151}
1152
1099/* 1153/*
1100 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. 1154 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1101 * If the user configures more, place BE in vlan promiscuous mode. 1155 * If the user configures more, place BE in vlan promiscuous mode.
@@ -1108,11 +1162,11 @@ static int be_vid_config(struct be_adapter *adapter)
1108 int status = 0; 1162 int status = 0;
1109 1163
1110 /* No need to further configure vids if in promiscuous mode */ 1164 /* No need to further configure vids if in promiscuous mode */
1111 if (adapter->promiscuous) 1165 if (be_in_all_promisc(adapter))
1112 return 0; 1166 return 0;
1113 1167
1114 if (adapter->vlans_added > be_max_vlans(adapter)) 1168 if (adapter->vlans_added > be_max_vlans(adapter))
1115 goto set_vlan_promisc; 1169 return be_set_vlan_promisc(adapter);
1116 1170
1117 /* Construct VLAN Table to give to HW */ 1171 /* Construct VLAN Table to give to HW */
1118 for_each_set_bit(i, adapter->vids, VLAN_N_VID) 1172 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
@@ -1120,36 +1174,14 @@ static int be_vid_config(struct be_adapter *adapter)
1120 1174
1121 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); 1175 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
1122 if (status) { 1176 if (status) {
1177 dev_err(dev, "Setting HW VLAN filtering failed\n");
1123 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1178 /* Set to VLAN promisc mode as setting VLAN filter failed */
1124 if (addl_status(status) == 1179 if (addl_status(status) ==
1125 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES) 1180 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1126 goto set_vlan_promisc; 1181 return be_set_vlan_promisc(adapter);
1127 dev_err(dev, "Setting HW VLAN filtering failed\n"); 1182 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1128 } else { 1183 status = be_clear_vlan_promisc(adapter);
1129 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1130 /* hw VLAN filtering re-enabled. */
1131 status = be_cmd_rx_filter(adapter,
1132 BE_FLAGS_VLAN_PROMISC, OFF);
1133 if (!status) {
1134 dev_info(dev,
1135 "Disabling VLAN Promiscuous mode\n");
1136 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1137 }
1138 }
1139 } 1184 }
1140
1141 return status;
1142
1143set_vlan_promisc:
1144 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1145 return 0;
1146
1147 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1148 if (!status) {
1149 dev_info(dev, "Enable VLAN Promiscuous mode\n");
1150 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1151 } else
1152 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
1153 return status; 1185 return status;
1154} 1186}
1155 1187
@@ -1191,79 +1223,99 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1191 return be_vid_config(adapter); 1223 return be_vid_config(adapter);
1192} 1224}
1193 1225
1194static void be_clear_promisc(struct be_adapter *adapter) 1226static void be_clear_all_promisc(struct be_adapter *adapter)
1195{ 1227{
1196 adapter->promiscuous = false; 1228 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
1197 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC); 1229 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1230}
1198 1231
1199 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); 1232static void be_set_all_promisc(struct be_adapter *adapter)
1233{
1234 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1235 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1200} 1236}
1201 1237
1202static void be_set_rx_mode(struct net_device *netdev) 1238static void be_set_mc_promisc(struct be_adapter *adapter)
1203{ 1239{
1204 struct be_adapter *adapter = netdev_priv(netdev);
1205 int status; 1240 int status;
1206 1241
1207 if (netdev->flags & IFF_PROMISC) { 1242 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1208 be_cmd_rx_filter(adapter, IFF_PROMISC, ON); 1243 return;
1209 adapter->promiscuous = true;
1210 goto done;
1211 }
1212 1244
1213 /* BE was previously in promiscuous mode; disable it */ 1245 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1214 if (adapter->promiscuous) { 1246 if (!status)
1215 be_clear_promisc(adapter); 1247 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1216 if (adapter->vlans_added) 1248}
1217 be_vid_config(adapter); 1249
1250static void be_set_mc_list(struct be_adapter *adapter)
1251{
1252 int status;
1253
1254 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1255 if (!status)
1256 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1257 else
1258 be_set_mc_promisc(adapter);
1259}
1260
1261static void be_set_uc_list(struct be_adapter *adapter)
1262{
1263 struct netdev_hw_addr *ha;
1264 int i = 1; /* First slot is claimed by the Primary MAC */
1265
1266 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1267 be_cmd_pmac_del(adapter, adapter->if_handle,
1268 adapter->pmac_id[i], 0);
1269
1270 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1271 be_set_all_promisc(adapter);
1272 return;
1218 } 1273 }
1219 1274
1220 /* Enable multicast promisc if num configured exceeds what we support */ 1275 netdev_for_each_uc_addr(ha, adapter->netdev) {
1221 if (netdev->flags & IFF_ALLMULTI || 1276 adapter->uc_macs++; /* First slot is for Primary MAC */
1222 netdev_mc_count(netdev) > be_max_mc(adapter)) 1277 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1223 goto set_mcast_promisc; 1278 &adapter->pmac_id[adapter->uc_macs], 0);
1279 }
1280}
1224 1281
1225 if (netdev_uc_count(netdev) != adapter->uc_macs) { 1282static void be_clear_uc_list(struct be_adapter *adapter)
1226 struct netdev_hw_addr *ha; 1283{
1227 int i = 1; /* First slot is claimed by the Primary MAC */ 1284 int i;
1228 1285
1229 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) { 1286 for (i = 1; i < (adapter->uc_macs + 1); i++)
1230 be_cmd_pmac_del(adapter, adapter->if_handle, 1287 be_cmd_pmac_del(adapter, adapter->if_handle,
1231 adapter->pmac_id[i], 0); 1288 adapter->pmac_id[i], 0);
1232 } 1289 adapter->uc_macs = 0;
1290}
1233 1291
1234 if (netdev_uc_count(netdev) > be_max_uc(adapter)) { 1292static void be_set_rx_mode(struct net_device *netdev)
1235 be_cmd_rx_filter(adapter, IFF_PROMISC, ON); 1293{
1236 adapter->promiscuous = true; 1294 struct be_adapter *adapter = netdev_priv(netdev);
1237 goto done;
1238 }
1239 1295
1240 netdev_for_each_uc_addr(ha, adapter->netdev) { 1296 if (netdev->flags & IFF_PROMISC) {
1241 adapter->uc_macs++; /* First slot is for Primary MAC */ 1297 be_set_all_promisc(adapter);
1242 be_cmd_pmac_add(adapter, (u8 *)ha->addr, 1298 return;
1243 adapter->if_handle,
1244 &adapter->pmac_id[adapter->uc_macs], 0);
1245 }
1246 } 1299 }
1247 1300
1248 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); 1301 /* Interface was previously in promiscuous mode; disable it */
1249 if (!status) { 1302 if (be_in_all_promisc(adapter)) {
1250 if (adapter->flags & BE_FLAGS_MCAST_PROMISC) 1303 be_clear_all_promisc(adapter);
1251 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC; 1304 if (adapter->vlans_added)
1252 goto done; 1305 be_vid_config(adapter);
1253 } 1306 }
1254 1307
1255set_mcast_promisc: 1308 /* Enable multicast promisc if num configured exceeds what we support */
1256 if (adapter->flags & BE_FLAGS_MCAST_PROMISC) 1309 if (netdev->flags & IFF_ALLMULTI ||
1310 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1311 be_set_mc_promisc(adapter);
1257 return; 1312 return;
1313 }
1258 1314
1259 /* Set to MCAST promisc mode if setting MULTICAST address fails 1315 if (netdev_uc_count(netdev) != adapter->uc_macs)
1260 * or if num configured exceeds what we support 1316 be_set_uc_list(adapter);
1261 */ 1317
1262 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); 1318 be_set_mc_list(adapter);
1263 if (!status)
1264 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
1265done:
1266 return;
1267} 1319}
1268 1320
1269static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1321static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
@@ -1959,32 +2011,34 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1959static u16 be_tx_compl_process(struct be_adapter *adapter, 2011static u16 be_tx_compl_process(struct be_adapter *adapter,
1960 struct be_tx_obj *txo, u16 last_index) 2012 struct be_tx_obj *txo, u16 last_index)
1961{ 2013{
2014 struct sk_buff **sent_skbs = txo->sent_skb_list;
1962 struct be_queue_info *txq = &txo->q; 2015 struct be_queue_info *txq = &txo->q;
2016 u16 frag_index, num_wrbs = 0;
2017 struct sk_buff *skb = NULL;
2018 bool unmap_skb_hdr = false;
1963 struct be_eth_wrb *wrb; 2019 struct be_eth_wrb *wrb;
1964 struct sk_buff **sent_skbs = txo->sent_skb_list;
1965 struct sk_buff *sent_skb;
1966 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1967 bool unmap_skb_hdr = true;
1968
1969 sent_skb = sent_skbs[txq->tail];
1970 BUG_ON(!sent_skb);
1971 sent_skbs[txq->tail] = NULL;
1972
1973 /* skip header wrb */
1974 queue_tail_inc(txq);
1975 2020
1976 do { 2021 do {
1977 cur_index = txq->tail; 2022 if (sent_skbs[txq->tail]) {
2023 /* Free skb from prev req */
2024 if (skb)
2025 dev_consume_skb_any(skb);
2026 skb = sent_skbs[txq->tail];
2027 sent_skbs[txq->tail] = NULL;
2028 queue_tail_inc(txq); /* skip hdr wrb */
2029 num_wrbs++;
2030 unmap_skb_hdr = true;
2031 }
1978 wrb = queue_tail_node(txq); 2032 wrb = queue_tail_node(txq);
2033 frag_index = txq->tail;
1979 unmap_tx_frag(&adapter->pdev->dev, wrb, 2034 unmap_tx_frag(&adapter->pdev->dev, wrb,
1980 (unmap_skb_hdr && skb_headlen(sent_skb))); 2035 (unmap_skb_hdr && skb_headlen(skb)));
1981 unmap_skb_hdr = false; 2036 unmap_skb_hdr = false;
1982
1983 num_wrbs++;
1984 queue_tail_inc(txq); 2037 queue_tail_inc(txq);
1985 } while (cur_index != last_index); 2038 num_wrbs++;
2039 } while (frag_index != last_index);
2040 dev_consume_skb_any(skb);
1986 2041
1987 dev_consume_skb_any(sent_skb);
1988 return num_wrbs; 2042 return num_wrbs;
1989} 2043}
1990 2044
@@ -2068,12 +2122,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
2068 2122
2069static void be_tx_compl_clean(struct be_adapter *adapter) 2123static void be_tx_compl_clean(struct be_adapter *adapter)
2070{ 2124{
2125 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2126 struct device *dev = &adapter->pdev->dev;
2071 struct be_tx_obj *txo; 2127 struct be_tx_obj *txo;
2072 struct be_queue_info *txq; 2128 struct be_queue_info *txq;
2073 struct be_eth_tx_compl *txcp; 2129 struct be_eth_tx_compl *txcp;
2074 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2075 struct sk_buff *sent_skb;
2076 bool dummy_wrb;
2077 int i, pending_txqs; 2130 int i, pending_txqs;
2078 2131
2079 /* Stop polling for compls when HW has been silent for 10ms */ 2132 /* Stop polling for compls when HW has been silent for 10ms */
@@ -2095,7 +2148,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2095 atomic_sub(num_wrbs, &txq->used); 2148 atomic_sub(num_wrbs, &txq->used);
2096 timeo = 0; 2149 timeo = 0;
2097 } 2150 }
2098 if (atomic_read(&txq->used) == 0) 2151 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
2099 pending_txqs--; 2152 pending_txqs--;
2100 } 2153 }
2101 2154
@@ -2105,21 +2158,29 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2105 mdelay(1); 2158 mdelay(1);
2106 } while (true); 2159 } while (true);
2107 2160
2161 /* Free enqueued TX that was never notified to HW */
2108 for_all_tx_queues(adapter, txo, i) { 2162 for_all_tx_queues(adapter, txo, i) {
2109 txq = &txo->q; 2163 txq = &txo->q;
2110 if (atomic_read(&txq->used))
2111 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2112 atomic_read(&txq->used));
2113 2164
2114 /* free posted tx for which compls will never arrive */ 2165 if (atomic_read(&txq->used)) {
2115 while (atomic_read(&txq->used)) { 2166 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2116 sent_skb = txo->sent_skb_list[txq->tail]; 2167 i, atomic_read(&txq->used));
2168 notified_idx = txq->tail;
2117 end_idx = txq->tail; 2169 end_idx = txq->tail;
2118 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb, 2170 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2119 &dummy_wrb); 2171 txq->len);
2120 index_adv(&end_idx, num_wrbs - 1, txq->len); 2172 /* Use the tx-compl process logic to handle requests
2173 * that were not sent to the HW.
2174 */
2121 num_wrbs = be_tx_compl_process(adapter, txo, end_idx); 2175 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2122 atomic_sub(num_wrbs, &txq->used); 2176 atomic_sub(num_wrbs, &txq->used);
2177 BUG_ON(atomic_read(&txq->used));
2178 txo->pend_wrb_cnt = 0;
2179 /* Since hw was never notified of these requests,
2180 * reset TXQ indices
2181 */
2182 txq->head = notified_idx;
2183 txq->tail = notified_idx;
2123 } 2184 }
2124 } 2185 }
2125} 2186}
@@ -2514,6 +2575,106 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2514 } 2575 }
2515} 2576}
2516 2577
2578#ifdef CONFIG_NET_RX_BUSY_POLL
2579static inline bool be_lock_napi(struct be_eq_obj *eqo)
2580{
2581 bool status = true;
2582
2583 spin_lock(&eqo->lock); /* BH is already disabled */
2584 if (eqo->state & BE_EQ_LOCKED) {
2585 WARN_ON(eqo->state & BE_EQ_NAPI);
2586 eqo->state |= BE_EQ_NAPI_YIELD;
2587 status = false;
2588 } else {
2589 eqo->state = BE_EQ_NAPI;
2590 }
2591 spin_unlock(&eqo->lock);
2592 return status;
2593}
2594
2595static inline void be_unlock_napi(struct be_eq_obj *eqo)
2596{
2597 spin_lock(&eqo->lock); /* BH is already disabled */
2598
2599 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2600 eqo->state = BE_EQ_IDLE;
2601
2602 spin_unlock(&eqo->lock);
2603}
2604
2605static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2606{
2607 bool status = true;
2608
2609 spin_lock_bh(&eqo->lock);
2610 if (eqo->state & BE_EQ_LOCKED) {
2611 eqo->state |= BE_EQ_POLL_YIELD;
2612 status = false;
2613 } else {
2614 eqo->state |= BE_EQ_POLL;
2615 }
2616 spin_unlock_bh(&eqo->lock);
2617 return status;
2618}
2619
2620static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2621{
2622 spin_lock_bh(&eqo->lock);
2623
2624 WARN_ON(eqo->state & (BE_EQ_NAPI));
2625 eqo->state = BE_EQ_IDLE;
2626
2627 spin_unlock_bh(&eqo->lock);
2628}
2629
2630static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2631{
2632 spin_lock_init(&eqo->lock);
2633 eqo->state = BE_EQ_IDLE;
2634}
2635
2636static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2637{
2638 local_bh_disable();
2639
2640 /* It's enough to just acquire napi lock on the eqo to stop
2641 * be_busy_poll() from processing any queueus.
2642 */
2643 while (!be_lock_napi(eqo))
2644 mdelay(1);
2645
2646 local_bh_enable();
2647}
2648
2649#else /* CONFIG_NET_RX_BUSY_POLL */
2650
2651static inline bool be_lock_napi(struct be_eq_obj *eqo)
2652{
2653 return true;
2654}
2655
2656static inline void be_unlock_napi(struct be_eq_obj *eqo)
2657{
2658}
2659
2660static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2661{
2662 return false;
2663}
2664
2665static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2666{
2667}
2668
2669static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2670{
2671}
2672
2673static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2674{
2675}
2676#endif /* CONFIG_NET_RX_BUSY_POLL */
2677
2517int be_poll(struct napi_struct *napi, int budget) 2678int be_poll(struct napi_struct *napi, int budget)
2518{ 2679{
2519 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); 2680 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
@@ -2833,11 +2994,7 @@ static int be_close(struct net_device *netdev)
2833 be_tx_compl_clean(adapter); 2994 be_tx_compl_clean(adapter);
2834 2995
2835 be_rx_qs_destroy(adapter); 2996 be_rx_qs_destroy(adapter);
2836 2997 be_clear_uc_list(adapter);
2837 for (i = 1; i < (adapter->uc_macs + 1); i++)
2838 be_cmd_pmac_del(adapter, adapter->if_handle,
2839 adapter->pmac_id[i], 0);
2840 adapter->uc_macs = 0;
2841 2998
2842 for_all_evt_queues(adapter, eqo, i) { 2999 for_all_evt_queues(adapter, eqo, i) {
2843 if (msix_enabled(adapter)) 3000 if (msix_enabled(adapter))
@@ -3008,6 +3165,19 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
3008 return status; 3165 return status;
3009} 3166}
3010 3167
3168static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3169{
3170 u32 addr;
3171
3172 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3173
3174 mac[5] = (u8)(addr & 0xFF);
3175 mac[4] = (u8)((addr >> 8) & 0xFF);
3176 mac[3] = (u8)((addr >> 16) & 0xFF);
3177 /* Use the OUI from the current MAC address */
3178 memcpy(mac, adapter->netdev->dev_addr, 3);
3179}
3180
3011/* 3181/*
3012 * Generate a seed MAC address from the PF MAC Address using jhash. 3182 * Generate a seed MAC address from the PF MAC Address using jhash.
3013 * MAC Address for VFs are assigned incrementally starting from the seed. 3183 * MAC Address for VFs are assigned incrementally starting from the seed.
@@ -3108,14 +3278,9 @@ static void be_cancel_worker(struct be_adapter *adapter)
3108 3278
3109static void be_mac_clear(struct be_adapter *adapter) 3279static void be_mac_clear(struct be_adapter *adapter)
3110{ 3280{
3111 int i;
3112
3113 if (adapter->pmac_id) { 3281 if (adapter->pmac_id) {
3114 for (i = 0; i < (adapter->uc_macs + 1); i++) 3282 be_cmd_pmac_del(adapter, adapter->if_handle,
3115 be_cmd_pmac_del(adapter, adapter->if_handle, 3283 adapter->pmac_id[0], 0);
3116 adapter->pmac_id[i], 0);
3117 adapter->uc_macs = 0;
3118
3119 kfree(adapter->pmac_id); 3284 kfree(adapter->pmac_id);
3120 adapter->pmac_id = NULL; 3285 adapter->pmac_id = NULL;
3121 } 3286 }
@@ -3171,13 +3336,32 @@ static int be_clear(struct be_adapter *adapter)
3171 return 0; 3336 return 0;
3172} 3337}
3173 3338
3339static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3340 u32 cap_flags, u32 vf)
3341{
3342 u32 en_flags;
3343 int status;
3344
3345 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3346 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3347 BE_IF_FLAGS_RSS;
3348
3349 en_flags &= cap_flags;
3350
3351 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3352 if_handle, vf);
3353
3354 return status;
3355}
3356
3174static int be_vfs_if_create(struct be_adapter *adapter) 3357static int be_vfs_if_create(struct be_adapter *adapter)
3175{ 3358{
3176 struct be_resources res = {0}; 3359 struct be_resources res = {0};
3177 struct be_vf_cfg *vf_cfg; 3360 struct be_vf_cfg *vf_cfg;
3178 u32 cap_flags, en_flags, vf; 3361 u32 cap_flags, vf;
3179 int status = 0; 3362 int status;
3180 3363
3364 /* If a FW profile exists, then cap_flags are updated */
3181 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3365 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3182 BE_IF_FLAGS_MULTICAST; 3366 BE_IF_FLAGS_MULTICAST;
3183 3367
@@ -3189,18 +3373,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3189 cap_flags = res.if_cap_flags; 3373 cap_flags = res.if_cap_flags;
3190 } 3374 }
3191 3375
3192 /* If a FW profile exists, then cap_flags are updated */ 3376 status = be_if_create(adapter, &vf_cfg->if_handle,
3193 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 3377 cap_flags, vf + 1);
3194 BE_IF_FLAGS_BROADCAST |
3195 BE_IF_FLAGS_MULTICAST);
3196 status =
3197 be_cmd_if_create(adapter, cap_flags, en_flags,
3198 &vf_cfg->if_handle, vf + 1);
3199 if (status) 3378 if (status)
3200 goto err; 3379 return status;
3201 } 3380 }
3202err: 3381
3203 return status; 3382 return 0;
3204} 3383}
3205 3384
3206static int be_vf_setup_init(struct be_adapter *adapter) 3385static int be_vf_setup_init(struct be_adapter *adapter)
@@ -3385,7 +3564,7 @@ static void be_setup_init(struct be_adapter *adapter)
3385 adapter->phy.link_speed = -1; 3564 adapter->phy.link_speed = -1;
3386 adapter->if_handle = -1; 3565 adapter->if_handle = -1;
3387 adapter->be3_native = false; 3566 adapter->be3_native = false;
3388 adapter->promiscuous = false; 3567 adapter->if_flags = 0;
3389 if (be_physfn(adapter)) 3568 if (be_physfn(adapter))
3390 adapter->cmd_privileges = MAX_PRIVILEGES; 3569 adapter->cmd_privileges = MAX_PRIVILEGES;
3391 else 3570 else
@@ -3512,7 +3691,9 @@ static int be_get_config(struct be_adapter *adapter)
3512 if (status) 3691 if (status)
3513 return status; 3692 return status;
3514 3693
3515 if (be_physfn(adapter)) { 3694 be_cmd_query_port_name(adapter);
3695
3696 if (be_physfn(adapter)) {
3516 status = be_cmd_get_active_profile(adapter, &profile_id); 3697 status = be_cmd_get_active_profile(adapter, &profile_id);
3517 if (!status) 3698 if (!status)
3518 dev_info(&adapter->pdev->dev, 3699 dev_info(&adapter->pdev->dev,
@@ -3638,10 +3819,20 @@ int be_update_queues(struct be_adapter *adapter)
3638 return status; 3819 return status;
3639} 3820}
3640 3821
3822static inline int fw_major_num(const char *fw_ver)
3823{
3824 int fw_major = 0, i;
3825
3826 i = sscanf(fw_ver, "%d.", &fw_major);
3827 if (i != 1)
3828 return 0;
3829
3830 return fw_major;
3831}
3832
3641static int be_setup(struct be_adapter *adapter) 3833static int be_setup(struct be_adapter *adapter)
3642{ 3834{
3643 struct device *dev = &adapter->pdev->dev; 3835 struct device *dev = &adapter->pdev->dev;
3644 u32 tx_fc, rx_fc, en_flags;
3645 int status; 3836 int status;
3646 3837
3647 be_setup_init(adapter); 3838 be_setup_init(adapter);
@@ -3657,13 +3848,8 @@ static int be_setup(struct be_adapter *adapter)
3657 if (status) 3848 if (status)
3658 goto err; 3849 goto err;
3659 3850
3660 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3851 status = be_if_create(adapter, &adapter->if_handle,
3661 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; 3852 be_if_cap_flags(adapter), 0);
3662 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3663 en_flags |= BE_IF_FLAGS_RSS;
3664 en_flags = en_flags & be_if_cap_flags(adapter);
3665 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3666 &adapter->if_handle, 0);
3667 if (status) 3853 if (status)
3668 goto err; 3854 goto err;
3669 3855
@@ -3696,11 +3882,14 @@ static int be_setup(struct be_adapter *adapter)
3696 3882
3697 be_cmd_get_acpi_wol_cap(adapter); 3883 be_cmd_get_acpi_wol_cap(adapter);
3698 3884
3699 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); 3885 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3886 adapter->rx_fc);
3887 if (status)
3888 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3889 &adapter->rx_fc);
3700 3890
3701 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) 3891 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3702 be_cmd_set_flow_control(adapter, adapter->tx_fc, 3892 adapter->tx_fc, adapter->rx_fc);
3703 adapter->rx_fc);
3704 3893
3705 if (be_physfn(adapter)) 3894 if (be_physfn(adapter))
3706 be_cmd_set_logical_link_config(adapter, 3895 be_cmd_set_logical_link_config(adapter,
@@ -3739,7 +3928,7 @@ static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3739 3928
3740static bool phy_flashing_required(struct be_adapter *adapter) 3929static bool phy_flashing_required(struct be_adapter *adapter)
3741{ 3930{
3742 return (adapter->phy.phy_type == TN_8022 && 3931 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
3743 adapter->phy.interface_type == PHY_TYPE_BASET_10GB); 3932 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3744} 3933}
3745 3934
@@ -3790,7 +3979,8 @@ static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3790 int status; 3979 int status;
3791 u8 crc[4]; 3980 u8 crc[4];
3792 3981
3793 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4); 3982 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
3983 img_size - 4);
3794 if (status) 3984 if (status)
3795 return status; 3985 return status;
3796 3986
@@ -3806,13 +3996,13 @@ static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3806} 3996}
3807 3997
3808static int be_flash(struct be_adapter *adapter, const u8 *img, 3998static int be_flash(struct be_adapter *adapter, const u8 *img,
3809 struct be_dma_mem *flash_cmd, int optype, int img_size) 3999 struct be_dma_mem *flash_cmd, int optype, int img_size,
4000 u32 img_offset)
3810{ 4001{
4002 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
3811 struct be_cmd_write_flashrom *req = flash_cmd->va; 4003 struct be_cmd_write_flashrom *req = flash_cmd->va;
3812 u32 total_bytes, flash_op, num_bytes;
3813 int status; 4004 int status;
3814 4005
3815 total_bytes = img_size;
3816 while (total_bytes) { 4006 while (total_bytes) {
3817 num_bytes = min_t(u32, 32*1024, total_bytes); 4007 num_bytes = min_t(u32, 32*1024, total_bytes);
3818 4008
@@ -3833,12 +4023,15 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
3833 memcpy(req->data_buf, img, num_bytes); 4023 memcpy(req->data_buf, img, num_bytes);
3834 img += num_bytes; 4024 img += num_bytes;
3835 status = be_cmd_write_flashrom(adapter, flash_cmd, optype, 4025 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3836 flash_op, num_bytes); 4026 flash_op, img_offset +
4027 bytes_sent, num_bytes);
3837 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST && 4028 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
3838 optype == OPTYPE_PHY_FW) 4029 optype == OPTYPE_PHY_FW)
3839 break; 4030 break;
3840 else if (status) 4031 else if (status)
3841 return status; 4032 return status;
4033
4034 bytes_sent += num_bytes;
3842 } 4035 }
3843 return 0; 4036 return 0;
3844} 4037}
@@ -3906,6 +4099,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
3906 pflashcomp = gen2_flash_types; 4099 pflashcomp = gen2_flash_types;
3907 filehdr_size = sizeof(struct flash_file_hdr_g2); 4100 filehdr_size = sizeof(struct flash_file_hdr_g2);
3908 num_comp = ARRAY_SIZE(gen2_flash_types); 4101 num_comp = ARRAY_SIZE(gen2_flash_types);
4102 img_hdrs_size = 0;
3909 } 4103 }
3910 4104
3911 /* Get flash section info*/ 4105 /* Get flash section info*/
@@ -3950,7 +4144,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
3950 return -1; 4144 return -1;
3951 4145
3952 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, 4146 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3953 pflashcomp[i].size); 4147 pflashcomp[i].size, 0);
3954 if (status) { 4148 if (status) {
3955 dev_err(dev, "Flashing section type 0x%x failed\n", 4149 dev_err(dev, "Flashing section type 0x%x failed\n",
3956 pflashcomp[i].img_type); 4150 pflashcomp[i].img_type);
@@ -4017,12 +4211,12 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
4017 struct be_dma_mem *flash_cmd, int num_of_images) 4211 struct be_dma_mem *flash_cmd, int num_of_images)
4018{ 4212{
4019 int img_hdrs_size = num_of_images * sizeof(struct image_hdr); 4213 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
4214 bool crc_match, old_fw_img, flash_offset_support = true;
4020 struct device *dev = &adapter->pdev->dev; 4215 struct device *dev = &adapter->pdev->dev;
4021 struct flash_section_info *fsec = NULL; 4216 struct flash_section_info *fsec = NULL;
4022 u32 img_offset, img_size, img_type; 4217 u32 img_offset, img_size, img_type;
4218 u16 img_optype, flash_optype;
4023 int status, i, filehdr_size; 4219 int status, i, filehdr_size;
4024 bool crc_match, old_fw_img;
4025 u16 img_optype;
4026 const u8 *p; 4220 const u8 *p;
4027 4221
4028 filehdr_size = sizeof(struct flash_file_hdr_g3); 4222 filehdr_size = sizeof(struct flash_file_hdr_g3);
@@ -4032,6 +4226,7 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
4032 return -EINVAL; 4226 return -EINVAL;
4033 } 4227 }
4034 4228
4229retry_flash:
4035 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { 4230 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4036 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); 4231 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4037 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); 4232 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
@@ -4041,6 +4236,12 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
4041 4236
4042 if (img_optype == 0xFFFF) 4237 if (img_optype == 0xFFFF)
4043 continue; 4238 continue;
4239
4240 if (flash_offset_support)
4241 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4242 else
4243 flash_optype = img_optype;
4244
4044 /* Don't bother verifying CRC if an old FW image is being 4245 /* Don't bother verifying CRC if an old FW image is being
4045 * flashed 4246 * flashed
4046 */ 4247 */
@@ -4049,16 +4250,26 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
4049 4250
4050 status = be_check_flash_crc(adapter, fw->data, img_offset, 4251 status = be_check_flash_crc(adapter, fw->data, img_offset,
4051 img_size, filehdr_size + 4252 img_size, filehdr_size +
4052 img_hdrs_size, img_optype, 4253 img_hdrs_size, flash_optype,
4053 &crc_match); 4254 &crc_match);
4054 /* The current FW image on the card does not recognize the new
4055 * FLASH op_type. The FW download is partially complete.
4056 * Reboot the server now to enable FW image to recognize the
4057 * new FLASH op_type. To complete the remaining process,
4058 * download the same FW again after the reboot.
4059 */
4060 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || 4255 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4061 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) { 4256 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
4257 /* The current FW image on the card does not support
4258 * OFFSET based flashing. Retry using older mechanism
4259 * of OPTYPE based flashing
4260 */
4261 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4262 flash_offset_support = false;
4263 goto retry_flash;
4264 }
4265
4266 /* The current FW image on the card does not recognize
4267 * the new FLASH op_type. The FW download is partially
4268 * complete. Reboot the server now to enable FW image
4269 * to recognize the new FLASH op_type. To complete the
4270 * remaining process, download the same FW again after
4271 * the reboot.
4272 */
4062 dev_err(dev, "Flash incomplete. Reset the server\n"); 4273 dev_err(dev, "Flash incomplete. Reset the server\n");
4063 dev_err(dev, "Download FW image again after reset\n"); 4274 dev_err(dev, "Download FW image again after reset\n");
4064 return -EAGAIN; 4275 return -EAGAIN;
@@ -4076,7 +4287,19 @@ flash:
4076 if (p + img_size > fw->data + fw->size) 4287 if (p + img_size > fw->data + fw->size)
4077 return -1; 4288 return -1;
4078 4289
4079 status = be_flash(adapter, p, flash_cmd, img_optype, img_size); 4290 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4291 img_offset);
4292
4293 /* The current FW image on the card does not support OFFSET
4294 * based flashing. Retry using older mechanism of OPTYPE based
4295 * flashing
4296 */
4297 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4298 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4299 flash_offset_support = false;
4300 goto retry_flash;
4301 }
4302
4080 /* For old FW images ignore ILLEGAL_FIELD error or errors on 4303 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4081 * UFI_DIR region 4304 * UFI_DIR region
4082 */ 4305 */
@@ -4179,98 +4402,105 @@ static int lancer_fw_download(struct be_adapter *adapter,
4179 return 0; 4402 return 0;
4180} 4403}
4181 4404
4182#define UFI_TYPE2 2 4405#define BE2_UFI 2
4183#define UFI_TYPE3 3 4406#define BE3_UFI 3
4184#define UFI_TYPE3R 10 4407#define BE3R_UFI 10
4185#define UFI_TYPE4 4 4408#define SH_UFI 4
4409#define SH_P2_UFI 11
4410
4186static int be_get_ufi_type(struct be_adapter *adapter, 4411static int be_get_ufi_type(struct be_adapter *adapter,
4187 struct flash_file_hdr_g3 *fhdr) 4412 struct flash_file_hdr_g3 *fhdr)
4188{ 4413{
4189 if (!fhdr) 4414 if (!fhdr) {
4190 goto be_get_ufi_exit; 4415 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4416 return -1;
4417 }
4191 4418
4192 if (skyhawk_chip(adapter) && fhdr->build[0] == '4') 4419 /* First letter of the build version is used to identify
4193 return UFI_TYPE4; 4420 * which chip this image file is meant for.
4194 else if (BE3_chip(adapter) && fhdr->build[0] == '3') { 4421 */
4195 if (fhdr->asic_type_rev == 0x10) 4422 switch (fhdr->build[0]) {
4196 return UFI_TYPE3R; 4423 case BLD_STR_UFI_TYPE_SH:
4197 else 4424 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4198 return UFI_TYPE3; 4425 SH_UFI;
4199 } else if (BE2_chip(adapter) && fhdr->build[0] == '2') 4426 case BLD_STR_UFI_TYPE_BE3:
4200 return UFI_TYPE2; 4427 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4428 BE3_UFI;
4429 case BLD_STR_UFI_TYPE_BE2:
4430 return BE2_UFI;
4431 default:
4432 return -1;
4433 }
4434}
4201 4435
4202be_get_ufi_exit: 4436/* Check if the flash image file is compatible with the adapter that
4203 dev_err(&adapter->pdev->dev, 4437 * is being flashed.
4204 "UFI and Interface are not compatible for flashing\n"); 4438 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
4205 return -1; 4439 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
4440 */
4441static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4442 struct flash_file_hdr_g3 *fhdr)
4443{
4444 int ufi_type = be_get_ufi_type(adapter, fhdr);
4445
4446 switch (ufi_type) {
4447 case SH_P2_UFI:
4448 return skyhawk_chip(adapter);
4449 case SH_UFI:
4450 return (skyhawk_chip(adapter) &&
4451 adapter->asic_rev < ASIC_REV_P2);
4452 case BE3R_UFI:
4453 return BE3_chip(adapter);
4454 case BE3_UFI:
4455 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4456 case BE2_UFI:
4457 return BE2_chip(adapter);
4458 default:
4459 return false;
4460 }
4206} 4461}
4207 4462
4208static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) 4463static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4209{ 4464{
4465 struct device *dev = &adapter->pdev->dev;
4210 struct flash_file_hdr_g3 *fhdr3; 4466 struct flash_file_hdr_g3 *fhdr3;
4211 struct image_hdr *img_hdr_ptr = NULL; 4467 struct image_hdr *img_hdr_ptr;
4468 int status = 0, i, num_imgs;
4212 struct be_dma_mem flash_cmd; 4469 struct be_dma_mem flash_cmd;
4213 const u8 *p;
4214 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
4215 4470
4216 flash_cmd.size = sizeof(struct be_cmd_write_flashrom); 4471 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4217 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, 4472 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4218 &flash_cmd.dma, GFP_KERNEL); 4473 dev_err(dev, "Flash image is not compatible with adapter\n");
4219 if (!flash_cmd.va) { 4474 return -EINVAL;
4220 status = -ENOMEM;
4221 goto be_fw_exit;
4222 } 4475 }
4223 4476
4224 p = fw->data; 4477 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4225 fhdr3 = (struct flash_file_hdr_g3 *)p; 4478 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4226 4479 GFP_KERNEL);
4227 ufi_type = be_get_ufi_type(adapter, fhdr3); 4480 if (!flash_cmd.va)
4481 return -ENOMEM;
4228 4482
4229 num_imgs = le32_to_cpu(fhdr3->num_imgs); 4483 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4230 for (i = 0; i < num_imgs; i++) { 4484 for (i = 0; i < num_imgs; i++) {
4231 img_hdr_ptr = (struct image_hdr *)(fw->data + 4485 img_hdr_ptr = (struct image_hdr *)(fw->data +
4232 (sizeof(struct flash_file_hdr_g3) + 4486 (sizeof(struct flash_file_hdr_g3) +
4233 i * sizeof(struct image_hdr))); 4487 i * sizeof(struct image_hdr)));
4234 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) { 4488 if (!BE2_chip(adapter) &&
4235 switch (ufi_type) { 4489 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4236 case UFI_TYPE4: 4490 continue;
4237 status = be_flash_skyhawk(adapter, fw,
4238 &flash_cmd, num_imgs);
4239 break;
4240 case UFI_TYPE3R:
4241 status = be_flash_BEx(adapter, fw, &flash_cmd,
4242 num_imgs);
4243 break;
4244 case UFI_TYPE3:
4245 /* Do not flash this ufi on BE3-R cards */
4246 if (adapter->asic_rev < 0x10)
4247 status = be_flash_BEx(adapter, fw,
4248 &flash_cmd,
4249 num_imgs);
4250 else {
4251 status = -EINVAL;
4252 dev_err(&adapter->pdev->dev,
4253 "Can't load BE3 UFI on BE3R\n");
4254 }
4255 }
4256 }
4257 }
4258
4259 if (ufi_type == UFI_TYPE2)
4260 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
4261 else if (ufi_type == -1)
4262 status = -EINVAL;
4263 4491
4264 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, 4492 if (skyhawk_chip(adapter))
4265 flash_cmd.dma); 4493 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4266 if (status) { 4494 num_imgs);
4267 dev_err(&adapter->pdev->dev, "Firmware load error\n"); 4495 else
4268 goto be_fw_exit; 4496 status = be_flash_BEx(adapter, fw, &flash_cmd,
4497 num_imgs);
4269 } 4498 }
4270 4499
4271 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); 4500 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4501 if (!status)
4502 dev_info(dev, "Firmware flashed successfully\n");
4272 4503
4273be_fw_exit:
4274 return status; 4504 return status;
4275} 4505}
4276 4506
@@ -4304,7 +4534,8 @@ fw_exit:
4304 return status; 4534 return status;
4305} 4535}
4306 4536
4307static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) 4537static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4538 u16 flags)
4308{ 4539{
4309 struct be_adapter *adapter = netdev_priv(dev); 4540 struct be_adapter *adapter = netdev_priv(dev);
4310 struct nlattr *attr, *br_spec; 4541 struct nlattr *attr, *br_spec;
@@ -4832,6 +5063,20 @@ static void be_func_recovery_task(struct work_struct *work)
4832 msecs_to_jiffies(1000)); 5063 msecs_to_jiffies(1000));
4833} 5064}
4834 5065
5066static void be_log_sfp_info(struct be_adapter *adapter)
5067{
5068 int status;
5069
5070 status = be_cmd_query_sfp_info(adapter);
5071 if (!status) {
5072 dev_err(&adapter->pdev->dev,
5073 "Unqualified SFP+ detected on %c from %s part no: %s",
5074 adapter->port_name, adapter->phy.vendor_name,
5075 adapter->phy.vendor_pn);
5076 }
5077 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5078}
5079
4835static void be_worker(struct work_struct *work) 5080static void be_worker(struct work_struct *work)
4836{ 5081{
4837 struct be_adapter *adapter = 5082 struct be_adapter *adapter =
@@ -4870,6 +5115,9 @@ static void be_worker(struct work_struct *work)
4870 5115
4871 be_eqd_update(adapter); 5116 be_eqd_update(adapter);
4872 5117
5118 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5119 be_log_sfp_info(adapter);
5120
4873reschedule: 5121reschedule:
4874 adapter->work_counter++; 5122 adapter->work_counter++;
4875 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 5123 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -4916,12 +5164,31 @@ static inline char *func_name(struct be_adapter *adapter)
4916 return be_physfn(adapter) ? "PF" : "VF"; 5164 return be_physfn(adapter) ? "PF" : "VF";
4917} 5165}
4918 5166
5167static inline char *nic_name(struct pci_dev *pdev)
5168{
5169 switch (pdev->device) {
5170 case OC_DEVICE_ID1:
5171 return OC_NAME;
5172 case OC_DEVICE_ID2:
5173 return OC_NAME_BE;
5174 case OC_DEVICE_ID3:
5175 case OC_DEVICE_ID4:
5176 return OC_NAME_LANCER;
5177 case BE_DEVICE_ID2:
5178 return BE3_NAME;
5179 case OC_DEVICE_ID5:
5180 case OC_DEVICE_ID6:
5181 return OC_NAME_SH;
5182 default:
5183 return BE_NAME;
5184 }
5185}
5186
4919static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) 5187static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4920{ 5188{
4921 int status = 0;
4922 struct be_adapter *adapter; 5189 struct be_adapter *adapter;
4923 struct net_device *netdev; 5190 struct net_device *netdev;
4924 char port_name; 5191 int status = 0;
4925 5192
4926 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER); 5193 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4927 5194
@@ -5015,10 +5282,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5015 schedule_delayed_work(&adapter->func_recovery_work, 5282 schedule_delayed_work(&adapter->func_recovery_work,
5016 msecs_to_jiffies(1000)); 5283 msecs_to_jiffies(1000));
5017 5284
5018 be_cmd_query_port_name(adapter, &port_name);
5019
5020 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev), 5285 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5021 func_name(adapter), mc_name(adapter), port_name); 5286 func_name(adapter), mc_name(adapter), adapter->port_name);
5022 5287
5023 return 0; 5288 return 0;
5024 5289
@@ -5083,6 +5348,10 @@ static int be_resume(struct pci_dev *pdev)
5083 if (status) 5348 if (status)
5084 return status; 5349 return status;
5085 5350
5351 status = be_cmd_reset_function(adapter);
5352 if (status)
5353 return status;
5354
5086 be_intr_set(adapter, true); 5355 be_intr_set(adapter, true);
5087 /* tell fw we're ready to fire cmds */ 5356 /* tell fw we're ready to fire cmds */
5088 status = be_cmd_fw_init(adapter); 5357 status = be_cmd_fw_init(adapter);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 270308315d43..ba84c4a9ce32 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -69,7 +69,8 @@ config FSL_XGMAC_MDIO
69 select PHYLIB 69 select PHYLIB
70 select OF_MDIO 70 select OF_MDIO
71 ---help--- 71 ---help---
72 This driver supports the MDIO bus on the Fman 10G Ethernet MACs. 72 This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
73 on the FMan mEMAC (which supports both Clauses 22 and 45)
73 74
74config UCC_GETH 75config UCC_GETH
75 tristate "Freescale QE Gigabit Ethernet" 76 tristate "Freescale QE Gigabit Ethernet"
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 40132929daf7..a86af8a7485d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -16,6 +16,7 @@
16#include <linux/clocksource.h> 16#include <linux/clocksource.h>
17#include <linux/net_tstamp.h> 17#include <linux/net_tstamp.h>
18#include <linux/ptp_clock_kernel.h> 18#include <linux/ptp_clock_kernel.h>
19#include <linux/timecounter.h>
19 20
20#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 21#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
21 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 22 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
@@ -356,6 +357,7 @@ struct bufdesc_ex {
356#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ 357#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
357#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 358#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
358#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 359#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
360#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
359#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) 361#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
360#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) 362#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
361#define FEC_ENET_TS_AVAIL ((uint)0x00010000) 363#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
@@ -513,6 +515,7 @@ struct fec_enet_private {
513 int irq[FEC_IRQ_NUM]; 515 int irq[FEC_IRQ_NUM];
514 bool bufdesc_ex; 516 bool bufdesc_ex;
515 int pause_flag; 517 int pause_flag;
518 int wol_flag;
516 u32 quirks; 519 u32 quirks;
517 520
518 struct napi_struct napi; 521 struct napi_struct napi;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bba87775419d..9bb6220663b2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -188,6 +188,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
188#define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 188#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
189#define FEC_MMFR_TA (2 << 16) 189#define FEC_MMFR_TA (2 << 16)
190#define FEC_MMFR_DATA(v) (v & 0xffff) 190#define FEC_MMFR_DATA(v) (v & 0xffff)
191/* FEC ECR bits definition */
192#define FEC_ECR_MAGICEN (1 << 2)
193#define FEC_ECR_SLEEP (1 << 3)
191 194
192#define FEC_MII_TIMEOUT 30000 /* us */ 195#define FEC_MII_TIMEOUT 30000 /* us */
193 196
@@ -196,6 +199,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
196 199
197#define FEC_PAUSE_FLAG_AUTONEG 0x1 200#define FEC_PAUSE_FLAG_AUTONEG 0x1
198#define FEC_PAUSE_FLAG_ENABLE 0x2 201#define FEC_PAUSE_FLAG_ENABLE 0x2
202#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
203#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
204#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
199 205
200#define COPYBREAK_DEFAULT 256 206#define COPYBREAK_DEFAULT 256
201 207
@@ -1090,7 +1096,9 @@ static void
1090fec_stop(struct net_device *ndev) 1096fec_stop(struct net_device *ndev)
1091{ 1097{
1092 struct fec_enet_private *fep = netdev_priv(ndev); 1098 struct fec_enet_private *fep = netdev_priv(ndev);
1099 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1093 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); 1100 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1101 u32 val;
1094 1102
1095 /* We cannot expect a graceful transmit stop without link !!! */ 1103 /* We cannot expect a graceful transmit stop without link !!! */
1096 if (fep->link) { 1104 if (fep->link) {
@@ -1104,17 +1112,28 @@ fec_stop(struct net_device *ndev)
1104 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1112 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1105 * instead of reset MAC itself. 1113 * instead of reset MAC itself.
1106 */ 1114 */
1107 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 1115 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1108 writel(0, fep->hwp + FEC_ECNTRL); 1116 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1117 writel(0, fep->hwp + FEC_ECNTRL);
1118 } else {
1119 writel(1, fep->hwp + FEC_ECNTRL);
1120 udelay(10);
1121 }
1122 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1109 } else { 1123 } else {
1110 writel(1, fep->hwp + FEC_ECNTRL); 1124 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1111 udelay(10); 1125 val = readl(fep->hwp + FEC_ECNTRL);
1126 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1127 writel(val, fep->hwp + FEC_ECNTRL);
1128
1129 if (pdata && pdata->sleep_mode_enable)
1130 pdata->sleep_mode_enable(true);
1112 } 1131 }
1113 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1132 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1114 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1115 1133
1116 /* We have to keep ENET enabled to have MII interrupt stay working */ 1134 /* We have to keep ENET enabled to have MII interrupt stay working */
1117 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1135 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1136 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1118 writel(2, fep->hwp + FEC_ECNTRL); 1137 writel(2, fep->hwp + FEC_ECNTRL);
1119 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1138 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1120 } 1139 }
@@ -1170,12 +1189,13 @@ static void
1170fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1171{ 1190{
1172 struct fec_enet_private *fep; 1191 struct fec_enet_private *fep;
1173 struct bufdesc *bdp; 1192 struct bufdesc *bdp, *bdp_t;
1174 unsigned short status; 1193 unsigned short status;
1175 struct sk_buff *skb; 1194 struct sk_buff *skb;
1176 struct fec_enet_priv_tx_q *txq; 1195 struct fec_enet_priv_tx_q *txq;
1177 struct netdev_queue *nq; 1196 struct netdev_queue *nq;
1178 int index = 0; 1197 int index = 0;
1198 int i, bdnum;
1179 int entries_free; 1199 int entries_free;
1180 1200
1181 fep = netdev_priv(ndev); 1201 fep = netdev_priv(ndev);
@@ -1196,18 +1216,29 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1196 if (bdp == txq->cur_tx) 1216 if (bdp == txq->cur_tx)
1197 break; 1217 break;
1198 1218
1199 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); 1219 bdp_t = bdp;
1200 1220 bdnum = 1;
1221 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1201 skb = txq->tx_skbuff[index]; 1222 skb = txq->tx_skbuff[index];
1202 txq->tx_skbuff[index] = NULL; 1223 while (!skb) {
1203 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) 1224 bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
1204 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1225 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1205 bdp->cbd_datlen, DMA_TO_DEVICE); 1226 skb = txq->tx_skbuff[index];
1206 bdp->cbd_bufaddr = 0; 1227 bdnum++;
1207 if (!skb) {
1208 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1209 continue;
1210 } 1228 }
1229 if (skb_shinfo(skb)->nr_frags &&
1230 (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
1231 break;
1232
1233 for (i = 0; i < bdnum; i++) {
1234 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1235 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1236 bdp->cbd_datlen, DMA_TO_DEVICE);
1237 bdp->cbd_bufaddr = 0;
1238 if (i < bdnum - 1)
1239 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1240 }
1241 txq->tx_skbuff[index] = NULL;
1211 1242
1212 /* Check for errors. */ 1243 /* Check for errors. */
1213 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1244 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -2428,6 +2459,44 @@ static int fec_enet_set_tunable(struct net_device *netdev,
2428 return ret; 2459 return ret;
2429} 2460}
2430 2461
2462static void
2463fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2464{
2465 struct fec_enet_private *fep = netdev_priv(ndev);
2466
2467 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
2468 wol->supported = WAKE_MAGIC;
2469 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
2470 } else {
2471 wol->supported = wol->wolopts = 0;
2472 }
2473}
2474
2475static int
2476fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2477{
2478 struct fec_enet_private *fep = netdev_priv(ndev);
2479
2480 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
2481 return -EINVAL;
2482
2483 if (wol->wolopts & ~WAKE_MAGIC)
2484 return -EINVAL;
2485
2486 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
2487 if (device_may_wakeup(&ndev->dev)) {
2488 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
2489 if (fep->irq[0] > 0)
2490 enable_irq_wake(fep->irq[0]);
2491 } else {
2492 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
2493 if (fep->irq[0] > 0)
2494 disable_irq_wake(fep->irq[0]);
2495 }
2496
2497 return 0;
2498}
2499
2431static const struct ethtool_ops fec_enet_ethtool_ops = { 2500static const struct ethtool_ops fec_enet_ethtool_ops = {
2432 .get_settings = fec_enet_get_settings, 2501 .get_settings = fec_enet_get_settings,
2433 .set_settings = fec_enet_set_settings, 2502 .set_settings = fec_enet_set_settings,
@@ -2446,6 +2515,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
2446 .get_ts_info = fec_enet_get_ts_info, 2515 .get_ts_info = fec_enet_get_ts_info,
2447 .get_tunable = fec_enet_get_tunable, 2516 .get_tunable = fec_enet_get_tunable,
2448 .set_tunable = fec_enet_set_tunable, 2517 .set_tunable = fec_enet_set_tunable,
2518 .get_wol = fec_enet_get_wol,
2519 .set_wol = fec_enet_set_wol,
2449}; 2520};
2450 2521
2451static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2522static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2525,12 +2596,9 @@ static void fec_enet_free_queue(struct net_device *ndev)
2525 } 2596 }
2526 2597
2527 for (i = 0; i < fep->num_rx_queues; i++) 2598 for (i = 0; i < fep->num_rx_queues; i++)
2528 if (fep->rx_queue[i]) 2599 kfree(fep->rx_queue[i]);
2529 kfree(fep->rx_queue[i]);
2530
2531 for (i = 0; i < fep->num_tx_queues; i++) 2600 for (i = 0; i < fep->num_tx_queues; i++)
2532 if (fep->tx_queue[i]) 2601 kfree(fep->tx_queue[i]);
2533 kfree(fep->tx_queue[i]);
2534} 2602}
2535 2603
2536static int fec_enet_alloc_queue(struct net_device *ndev) 2604static int fec_enet_alloc_queue(struct net_device *ndev)
@@ -2706,6 +2774,9 @@ fec_enet_open(struct net_device *ndev)
2706 phy_start(fep->phy_dev); 2774 phy_start(fep->phy_dev);
2707 netif_tx_start_all_queues(ndev); 2775 netif_tx_start_all_queues(ndev);
2708 2776
2777 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
2778 FEC_WOL_FLAG_ENABLE);
2779
2709 return 0; 2780 return 0;
2710 2781
2711err_enet_mii_probe: 2782err_enet_mii_probe:
@@ -3155,6 +3226,9 @@ fec_probe(struct platform_device *pdev)
3155 3226
3156 platform_set_drvdata(pdev, ndev); 3227 platform_set_drvdata(pdev, ndev);
3157 3228
3229 if (of_get_property(np, "fsl,magic-packet", NULL))
3230 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
3231
3158 phy_node = of_parse_phandle(np, "phy-handle", 0); 3232 phy_node = of_parse_phandle(np, "phy-handle", 0);
3159 if (!phy_node && of_phy_is_fixed_link(np)) { 3233 if (!phy_node && of_phy_is_fixed_link(np)) {
3160 ret = of_phy_register_fixed_link(np); 3234 ret = of_phy_register_fixed_link(np);
@@ -3249,6 +3323,8 @@ fec_probe(struct platform_device *pdev)
3249 0, pdev->name, ndev); 3323 0, pdev->name, ndev);
3250 if (ret) 3324 if (ret)
3251 goto failed_irq; 3325 goto failed_irq;
3326
3327 fep->irq[i] = irq;
3252 } 3328 }
3253 3329
3254 init_completion(&fep->mdio_done); 3330 init_completion(&fep->mdio_done);
@@ -3265,6 +3341,9 @@ fec_probe(struct platform_device *pdev)
3265 if (ret) 3341 if (ret)
3266 goto failed_register; 3342 goto failed_register;
3267 3343
3344 device_init_wakeup(&ndev->dev, fep->wol_flag &
3345 FEC_WOL_HAS_MAGIC_PACKET);
3346
3268 if (fep->bufdesc_ex && fep->ptp_clock) 3347 if (fep->bufdesc_ex && fep->ptp_clock)
3269 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 3348 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
3270 3349
@@ -3318,6 +3397,8 @@ static int __maybe_unused fec_suspend(struct device *dev)
3318 3397
3319 rtnl_lock(); 3398 rtnl_lock();
3320 if (netif_running(ndev)) { 3399 if (netif_running(ndev)) {
3400 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
3401 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3321 phy_stop(fep->phy_dev); 3402 phy_stop(fep->phy_dev);
3322 napi_disable(&fep->napi); 3403 napi_disable(&fep->napi);
3323 netif_tx_lock_bh(ndev); 3404 netif_tx_lock_bh(ndev);
@@ -3325,11 +3406,12 @@ static int __maybe_unused fec_suspend(struct device *dev)
3325 netif_tx_unlock_bh(ndev); 3406 netif_tx_unlock_bh(ndev);
3326 fec_stop(ndev); 3407 fec_stop(ndev);
3327 fec_enet_clk_enable(ndev, false); 3408 fec_enet_clk_enable(ndev, false);
3328 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3409 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3410 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3329 } 3411 }
3330 rtnl_unlock(); 3412 rtnl_unlock();
3331 3413
3332 if (fep->reg_phy) 3414 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3333 regulator_disable(fep->reg_phy); 3415 regulator_disable(fep->reg_phy);
3334 3416
3335 /* SOC supply clock to phy, when clock is disabled, phy link down 3417 /* SOC supply clock to phy, when clock is disabled, phy link down
@@ -3345,9 +3427,11 @@ static int __maybe_unused fec_resume(struct device *dev)
3345{ 3427{
3346 struct net_device *ndev = dev_get_drvdata(dev); 3428 struct net_device *ndev = dev_get_drvdata(dev);
3347 struct fec_enet_private *fep = netdev_priv(ndev); 3429 struct fec_enet_private *fep = netdev_priv(ndev);
3430 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
3348 int ret; 3431 int ret;
3432 int val;
3349 3433
3350 if (fep->reg_phy) { 3434 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3351 ret = regulator_enable(fep->reg_phy); 3435 ret = regulator_enable(fep->reg_phy);
3352 if (ret) 3436 if (ret)
3353 return ret; 3437 return ret;
@@ -3355,12 +3439,21 @@ static int __maybe_unused fec_resume(struct device *dev)
3355 3439
3356 rtnl_lock(); 3440 rtnl_lock();
3357 if (netif_running(ndev)) { 3441 if (netif_running(ndev)) {
3358 pinctrl_pm_select_default_state(&fep->pdev->dev);
3359 ret = fec_enet_clk_enable(ndev, true); 3442 ret = fec_enet_clk_enable(ndev, true);
3360 if (ret) { 3443 if (ret) {
3361 rtnl_unlock(); 3444 rtnl_unlock();
3362 goto failed_clk; 3445 goto failed_clk;
3363 } 3446 }
3447 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
3448 if (pdata && pdata->sleep_mode_enable)
3449 pdata->sleep_mode_enable(false);
3450 val = readl(fep->hwp + FEC_ECNTRL);
3451 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
3452 writel(val, fep->hwp + FEC_ECNTRL);
3453 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
3454 } else {
3455 pinctrl_pm_select_default_state(&fep->pdev->dev);
3456 }
3364 fec_restart(ndev); 3457 fec_restart(ndev);
3365 netif_tx_lock_bh(ndev); 3458 netif_tx_lock_bh(ndev);
3366 netif_device_attach(ndev); 3459 netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 992c8c3db553..1f9cf2345266 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -374,23 +374,9 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
374 struct fec_enet_private *fep = 374 struct fec_enet_private *fep =
375 container_of(ptp, struct fec_enet_private, ptp_caps); 375 container_of(ptp, struct fec_enet_private, ptp_caps);
376 unsigned long flags; 376 unsigned long flags;
377 u64 now;
378 u32 counter;
379 377
380 spin_lock_irqsave(&fep->tmreg_lock, flags); 378 spin_lock_irqsave(&fep->tmreg_lock, flags);
381 379 timecounter_adjtime(&fep->tc, delta);
382 now = timecounter_read(&fep->tc);
383 now += delta;
384
385 /* Get the timer value based on adjusted timestamp.
386 * Update the counter with the masked value.
387 */
388 counter = now & fep->cc.mask;
389 writel(counter, fep->hwp + FEC_ATIME);
390
391 /* reset the timecounter */
392 timecounter_init(&fep->tc, &fep->cc, now);
393
394 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 380 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
395 381
396 return 0; 382 return 0;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 9e2bcb807923..a17628769a1f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -278,14 +278,20 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
278 fep->stats.collisions++; 278 fep->stats.collisions++;
279 279
280 /* unmap */ 280 /* unmap */
281 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 281 if (fep->mapped_as_page[dirtyidx])
282 skb->len, DMA_TO_DEVICE); 282 dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
283 CBDR_DATLEN(bdp), DMA_TO_DEVICE);
284 else
285 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
286 CBDR_DATLEN(bdp), DMA_TO_DEVICE);
283 287
284 /* 288 /*
285 * Free the sk buffer associated with this last transmit. 289 * Free the sk buffer associated with this last transmit.
286 */ 290 */
287 dev_kfree_skb(skb); 291 if (skb) {
288 fep->tx_skbuff[dirtyidx] = NULL; 292 dev_kfree_skb(skb);
293 fep->tx_skbuff[dirtyidx] = NULL;
294 }
289 295
290 /* 296 /*
291 * Update pointer to next buffer descriptor to be transmitted. 297 * Update pointer to next buffer descriptor to be transmitted.
@@ -299,7 +305,7 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
299 * Since we have freed up a buffer, the ring is no longer 305 * Since we have freed up a buffer, the ring is no longer
300 * full. 306 * full.
301 */ 307 */
302 if (!fep->tx_free++) 308 if (++fep->tx_free >= MAX_SKB_FRAGS)
303 do_wake = 1; 309 do_wake = 1;
304 has_tx_work = 1; 310 has_tx_work = 1;
305 } 311 }
@@ -509,6 +515,9 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
509 cbd_t __iomem *bdp; 515 cbd_t __iomem *bdp;
510 int curidx; 516 int curidx;
511 u16 sc; 517 u16 sc;
518 int nr_frags = skb_shinfo(skb)->nr_frags;
519 skb_frag_t *frag;
520 int len;
512 521
513#ifdef CONFIG_FS_ENET_MPC5121_FEC 522#ifdef CONFIG_FS_ENET_MPC5121_FEC
514 if (((unsigned long)skb->data) & 0x3) { 523 if (((unsigned long)skb->data) & 0x3) {
@@ -530,7 +539,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
530 */ 539 */
531 bdp = fep->cur_tx; 540 bdp = fep->cur_tx;
532 541
533 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { 542 if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
534 netif_stop_queue(dev); 543 netif_stop_queue(dev);
535 spin_unlock(&fep->tx_lock); 544 spin_unlock(&fep->tx_lock);
536 545
@@ -543,35 +552,42 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
543 } 552 }
544 553
545 curidx = bdp - fep->tx_bd_base; 554 curidx = bdp - fep->tx_bd_base;
546 /*
547 * Clear all of the status flags.
548 */
549 CBDC_SC(bdp, BD_ENET_TX_STATS);
550
551 /*
552 * Save skb pointer.
553 */
554 fep->tx_skbuff[curidx] = skb;
555
556 fep->stats.tx_bytes += skb->len;
557 555
556 len = skb->len;
557 fep->stats.tx_bytes += len;
558 if (nr_frags)
559 len -= skb->data_len;
560 fep->tx_free -= nr_frags + 1;
558 /* 561 /*
559 * Push the data cache so the CPM does not get stale memory data. 562 * Push the data cache so the CPM does not get stale memory data.
560 */ 563 */
561 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, 564 CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
562 skb->data, skb->len, DMA_TO_DEVICE)); 565 skb->data, len, DMA_TO_DEVICE));
563 CBDW_DATLEN(bdp, skb->len); 566 CBDW_DATLEN(bdp, len);
567
568 fep->mapped_as_page[curidx] = 0;
569 frag = skb_shinfo(skb)->frags;
570 while (nr_frags) {
571 CBDC_SC(bdp,
572 BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
573 CBDS_SC(bdp, BD_ENET_TX_READY);
574
575 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
576 bdp++, curidx++;
577 else
578 bdp = fep->tx_bd_base, curidx = 0;
564 579
565 /* 580 len = skb_frag_size(frag);
566 * If this was the last BD in the ring, start at the beginning again. 581 CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
567 */ 582 DMA_TO_DEVICE));
568 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 583 CBDW_DATLEN(bdp, len);
569 fep->cur_tx++;
570 else
571 fep->cur_tx = fep->tx_bd_base;
572 584
573 if (!--fep->tx_free) 585 fep->tx_skbuff[curidx] = NULL;
574 netif_stop_queue(dev); 586 fep->mapped_as_page[curidx] = 1;
587
588 frag++;
589 nr_frags--;
590 }
575 591
576 /* Trigger transmission start */ 592 /* Trigger transmission start */
577 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | 593 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
@@ -582,8 +598,22 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
582 * yay for hw reuse :) */ 598 * yay for hw reuse :) */
583 if (skb->len <= 60) 599 if (skb->len <= 60)
584 sc |= BD_ENET_TX_PAD; 600 sc |= BD_ENET_TX_PAD;
601 CBDC_SC(bdp, BD_ENET_TX_STATS);
585 CBDS_SC(bdp, sc); 602 CBDS_SC(bdp, sc);
586 603
604 /* Save skb pointer. */
605 fep->tx_skbuff[curidx] = skb;
606
607 /* If this was the last BD in the ring, start at the beginning again. */
608 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
609 bdp++;
610 else
611 bdp = fep->tx_bd_base;
612 fep->cur_tx = bdp;
613
614 if (fep->tx_free < MAX_SKB_FRAGS)
615 netif_stop_queue(dev);
616
587 skb_tx_timestamp(skb); 617 skb_tx_timestamp(skb);
588 618
589 (*fep->ops->tx_kickstart)(dev); 619 (*fep->ops->tx_kickstart)(dev);
@@ -917,7 +947,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
917 } 947 }
918 948
919 fpi->rx_ring = 32; 949 fpi->rx_ring = 32;
920 fpi->tx_ring = 32; 950 fpi->tx_ring = 64;
921 fpi->rx_copybreak = 240; 951 fpi->rx_copybreak = 240;
922 fpi->napi_weight = 17; 952 fpi->napi_weight = 17;
923 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); 953 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
@@ -955,7 +985,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
955 985
956 privsize = sizeof(*fep) + 986 privsize = sizeof(*fep) +
957 sizeof(struct sk_buff **) * 987 sizeof(struct sk_buff **) *
958 (fpi->rx_ring + fpi->tx_ring); 988 (fpi->rx_ring + fpi->tx_ring) +
989 sizeof(char) * fpi->tx_ring;
959 990
960 ndev = alloc_etherdev(privsize); 991 ndev = alloc_etherdev(privsize);
961 if (!ndev) { 992 if (!ndev) {
@@ -978,6 +1009,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
978 1009
979 fep->rx_skbuff = (struct sk_buff **)&fep[1]; 1010 fep->rx_skbuff = (struct sk_buff **)&fep[1];
980 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; 1011 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1012 fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
1013 fpi->tx_ring);
981 1014
982 spin_lock_init(&fep->lock); 1015 spin_lock_init(&fep->lock);
983 spin_lock_init(&fep->tx_lock); 1016 spin_lock_init(&fep->tx_lock);
@@ -1007,6 +1040,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
1007 1040
1008 netif_carrier_off(ndev); 1041 netif_carrier_off(ndev);
1009 1042
1043 ndev->features |= NETIF_F_SG;
1044
1010 ret = register_netdev(ndev); 1045 ret = register_netdev(ndev);
1011 if (ret) 1046 if (ret)
1012 goto out_free_bd; 1047 goto out_free_bd;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 3a4b49e0e717..f184d8f952e2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -134,6 +134,7 @@ struct fs_enet_private {
134 void __iomem *ring_base; 134 void __iomem *ring_base;
135 struct sk_buff **rx_skbuff; 135 struct sk_buff **rx_skbuff;
136 struct sk_buff **tx_skbuff; 136 struct sk_buff **tx_skbuff;
137 char *mapped_as_page;
137 cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */ 138 cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */
138 cbd_t __iomem *tx_bd_base; 139 cbd_t __iomem *tx_bd_base;
139 cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */ 140 cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 5645342f5b28..43df78882e48 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -116,7 +116,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116static void gfar_reset_task(struct work_struct *work); 116static void gfar_reset_task(struct work_struct *work);
117static void gfar_timeout(struct net_device *dev); 117static void gfar_timeout(struct net_device *dev);
118static int gfar_close(struct net_device *dev); 118static int gfar_close(struct net_device *dev);
119struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr); 119static struct sk_buff *gfar_new_skb(struct net_device *dev,
120 dma_addr_t *bufaddr);
120static int gfar_set_mac_address(struct net_device *dev); 121static int gfar_set_mac_address(struct net_device *dev);
121static int gfar_change_mtu(struct net_device *dev, int new_mtu); 122static int gfar_change_mtu(struct net_device *dev, int new_mtu);
122static irqreturn_t gfar_error(int irq, void *dev_id); 123static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -176,7 +177,7 @@ static int gfar_init_bds(struct net_device *ndev)
176 struct gfar_priv_rx_q *rx_queue = NULL; 177 struct gfar_priv_rx_q *rx_queue = NULL;
177 struct txbd8 *txbdp; 178 struct txbd8 *txbdp;
178 struct rxbd8 *rxbdp; 179 struct rxbd8 *rxbdp;
179 u32 *rfbptr; 180 u32 __iomem *rfbptr;
180 int i, j; 181 int i, j;
181 dma_addr_t bufaddr; 182 dma_addr_t bufaddr;
182 183
@@ -554,7 +555,7 @@ static void gfar_ints_enable(struct gfar_private *priv)
554 } 555 }
555} 556}
556 557
557void lock_tx_qs(struct gfar_private *priv) 558static void lock_tx_qs(struct gfar_private *priv)
558{ 559{
559 int i; 560 int i;
560 561
@@ -562,7 +563,7 @@ void lock_tx_qs(struct gfar_private *priv)
562 spin_lock(&priv->tx_queue[i]->txlock); 563 spin_lock(&priv->tx_queue[i]->txlock);
563} 564}
564 565
565void unlock_tx_qs(struct gfar_private *priv) 566static void unlock_tx_qs(struct gfar_private *priv)
566{ 567{
567 int i; 568 int i;
568 569
@@ -763,7 +764,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
763 u32 *tx_queues, *rx_queues; 764 u32 *tx_queues, *rx_queues;
764 unsigned short mode, poll_mode; 765 unsigned short mode, poll_mode;
765 766
766 if (!np || !of_device_is_available(np)) 767 if (!np)
767 return -ENODEV; 768 return -ENODEV;
768 769
769 if (of_device_is_compatible(np, "fsl,etsec2")) { 770 if (of_device_is_compatible(np, "fsl,etsec2")) {
@@ -2169,7 +2170,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2169void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 2170void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2170{ 2171{
2171 fcb->flags |= TXFCB_VLN; 2172 fcb->flags |= TXFCB_VLN;
2172 fcb->vlctl = vlan_tx_tag_get(skb); 2173 fcb->vlctl = skb_vlan_tag_get(skb);
2173} 2174}
2174 2175
2175static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 2176static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
@@ -2229,7 +2230,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2229 regs = tx_queue->grp->regs; 2230 regs = tx_queue->grp->regs;
2230 2231
2231 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); 2232 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2232 do_vlan = vlan_tx_tag_present(skb); 2233 do_vlan = skb_vlan_tag_present(skb);
2233 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2234 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2234 priv->hwts_tx_en; 2235 priv->hwts_tx_en;
2235 2236
@@ -2671,7 +2672,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2671 return skb; 2672 return skb;
2672} 2673}
2673 2674
2674struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) 2675static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
2675{ 2676{
2676 struct gfar_private *priv = netdev_priv(dev); 2677 struct gfar_private *priv = netdev_priv(dev);
2677 struct sk_buff *skb; 2678 struct sk_buff *skb;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index b581b8823a2a..9e1802400c23 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1039,7 +1039,7 @@ struct gfar_priv_rx_q {
1039 /* RX Coalescing values */ 1039 /* RX Coalescing values */
1040 unsigned char rxcoalescing; 1040 unsigned char rxcoalescing;
1041 unsigned long rxic; 1041 unsigned long rxic;
1042 u32 *rfbptr; 1042 u32 __iomem *rfbptr;
1043}; 1043};
1044 1044
1045enum gfar_irqinfo_id { 1045enum gfar_irqinfo_id {
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 6e7db66069aa..3a83bc2c613c 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -32,18 +32,19 @@ struct tgec_mdio_controller {
32 __be32 mdio_addr; /* MDIO address */ 32 __be32 mdio_addr; /* MDIO address */
33} __packed; 33} __packed;
34 34
35#define MDIO_STAT_ENC BIT(6)
35#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) 36#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
36#define MDIO_STAT_BSY (1 << 0) 37#define MDIO_STAT_BSY BIT(0)
37#define MDIO_STAT_RD_ER (1 << 1) 38#define MDIO_STAT_RD_ER BIT(1)
38#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) 39#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
39#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) 40#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
40#define MDIO_CTL_PRE_DIS (1 << 10) 41#define MDIO_CTL_PRE_DIS BIT(10)
41#define MDIO_CTL_SCAN_EN (1 << 11) 42#define MDIO_CTL_SCAN_EN BIT(11)
42#define MDIO_CTL_POST_INC (1 << 14) 43#define MDIO_CTL_POST_INC BIT(14)
43#define MDIO_CTL_READ (1 << 15) 44#define MDIO_CTL_READ BIT(15)
44 45
45#define MDIO_DATA(x) (x & 0xffff) 46#define MDIO_DATA(x) (x & 0xffff)
46#define MDIO_DATA_BSY (1 << 31) 47#define MDIO_DATA_BSY BIT(31)
47 48
48/* 49/*
49 * Wait until the MDIO bus is free 50 * Wait until the MDIO bus is free
@@ -51,12 +52,16 @@ struct tgec_mdio_controller {
51static int xgmac_wait_until_free(struct device *dev, 52static int xgmac_wait_until_free(struct device *dev,
52 struct tgec_mdio_controller __iomem *regs) 53 struct tgec_mdio_controller __iomem *regs)
53{ 54{
54 uint32_t status; 55 unsigned int timeout;
55 56
56 /* Wait till the bus is free */ 57 /* Wait till the bus is free */
57 status = spin_event_timeout( 58 timeout = TIMEOUT;
58 !((in_be32(&regs->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0); 59 while ((ioread32be(&regs->mdio_stat) & MDIO_STAT_BSY) && timeout) {
59 if (!status) { 60 cpu_relax();
61 timeout--;
62 }
63
64 if (!timeout) {
60 dev_err(dev, "timeout waiting for bus to be free\n"); 65 dev_err(dev, "timeout waiting for bus to be free\n");
61 return -ETIMEDOUT; 66 return -ETIMEDOUT;
62 } 67 }
@@ -70,12 +75,16 @@ static int xgmac_wait_until_free(struct device *dev,
70static int xgmac_wait_until_done(struct device *dev, 75static int xgmac_wait_until_done(struct device *dev,
71 struct tgec_mdio_controller __iomem *regs) 76 struct tgec_mdio_controller __iomem *regs)
72{ 77{
73 uint32_t status; 78 unsigned int timeout;
74 79
75 /* Wait till the MDIO write is complete */ 80 /* Wait till the MDIO write is complete */
76 status = spin_event_timeout( 81 timeout = TIMEOUT;
77 !((in_be32(&regs->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0); 82 while ((ioread32be(&regs->mdio_data) & MDIO_DATA_BSY) && timeout) {
78 if (!status) { 83 cpu_relax();
84 timeout--;
85 }
86
87 if (!timeout) {
79 dev_err(dev, "timeout waiting for operation to complete\n"); 88 dev_err(dev, "timeout waiting for operation to complete\n");
80 return -ETIMEDOUT; 89 return -ETIMEDOUT;
81 } 90 }
@@ -91,29 +100,42 @@ static int xgmac_wait_until_done(struct device *dev,
91static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) 100static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
92{ 101{
93 struct tgec_mdio_controller __iomem *regs = bus->priv; 102 struct tgec_mdio_controller __iomem *regs = bus->priv;
94 uint16_t dev_addr = regnum >> 16; 103 uint16_t dev_addr;
104 u32 mdio_ctl, mdio_stat;
95 int ret; 105 int ret;
96 106
97 /* Setup the MII Mgmt clock speed */ 107 mdio_stat = ioread32be(&regs->mdio_stat);
98 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100)); 108 if (regnum & MII_ADDR_C45) {
109 /* Clause 45 (ie 10G) */
110 dev_addr = (regnum >> 16) & 0x1f;
111 mdio_stat |= MDIO_STAT_ENC;
112 } else {
113 /* Clause 22 (ie 1G) */
114 dev_addr = regnum & 0x1f;
115 mdio_stat &= ~MDIO_STAT_ENC;
116 }
117
118 iowrite32be(mdio_stat, &regs->mdio_stat);
99 119
100 ret = xgmac_wait_until_free(&bus->dev, regs); 120 ret = xgmac_wait_until_free(&bus->dev, regs);
101 if (ret) 121 if (ret)
102 return ret; 122 return ret;
103 123
104 /* Set the port and dev addr */ 124 /* Set the port and dev addr */
105 out_be32(&regs->mdio_ctl, 125 mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
106 MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr)); 126 iowrite32be(mdio_ctl, &regs->mdio_ctl);
107 127
108 /* Set the register address */ 128 /* Set the register address */
109 out_be32(&regs->mdio_addr, regnum & 0xffff); 129 if (regnum & MII_ADDR_C45) {
130 iowrite32be(regnum & 0xffff, &regs->mdio_addr);
110 131
111 ret = xgmac_wait_until_free(&bus->dev, regs); 132 ret = xgmac_wait_until_free(&bus->dev, regs);
112 if (ret) 133 if (ret)
113 return ret; 134 return ret;
135 }
114 136
115 /* Write the value to the register */ 137 /* Write the value to the register */
116 out_be32(&regs->mdio_data, MDIO_DATA(value)); 138 iowrite32be(MDIO_DATA(value), &regs->mdio_data);
117 139
118 ret = xgmac_wait_until_done(&bus->dev, regs); 140 ret = xgmac_wait_until_done(&bus->dev, regs);
119 if (ret) 141 if (ret)
@@ -130,13 +152,22 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
130static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) 152static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
131{ 153{
132 struct tgec_mdio_controller __iomem *regs = bus->priv; 154 struct tgec_mdio_controller __iomem *regs = bus->priv;
133 uint16_t dev_addr = regnum >> 16; 155 uint16_t dev_addr;
156 uint32_t mdio_stat;
134 uint32_t mdio_ctl; 157 uint32_t mdio_ctl;
135 uint16_t value; 158 uint16_t value;
136 int ret; 159 int ret;
137 160
138 /* Setup the MII Mgmt clock speed */ 161 mdio_stat = ioread32be(&regs->mdio_stat);
139 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100)); 162 if (regnum & MII_ADDR_C45) {
163 dev_addr = (regnum >> 16) & 0x1f;
164 mdio_stat |= MDIO_STAT_ENC;
165 } else {
166 dev_addr = regnum & 0x1f;
167 mdio_stat &= ~MDIO_STAT_ENC;
168 }
169
170 iowrite32be(mdio_stat, &regs->mdio_stat);
140 171
141 ret = xgmac_wait_until_free(&bus->dev, regs); 172 ret = xgmac_wait_until_free(&bus->dev, regs);
142 if (ret) 173 if (ret)
@@ -144,54 +175,38 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
144 175
145 /* Set the Port and Device Addrs */ 176 /* Set the Port and Device Addrs */
146 mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); 177 mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
147 out_be32(&regs->mdio_ctl, mdio_ctl); 178 iowrite32be(mdio_ctl, &regs->mdio_ctl);
148 179
149 /* Set the register address */ 180 /* Set the register address */
150 out_be32(&regs->mdio_addr, regnum & 0xffff); 181 if (regnum & MII_ADDR_C45) {
182 iowrite32be(regnum & 0xffff, &regs->mdio_addr);
151 183
152 ret = xgmac_wait_until_free(&bus->dev, regs); 184 ret = xgmac_wait_until_free(&bus->dev, regs);
153 if (ret) 185 if (ret)
154 return ret; 186 return ret;
187 }
155 188
156 /* Initiate the read */ 189 /* Initiate the read */
157 out_be32(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ); 190 iowrite32be(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl);
158 191
159 ret = xgmac_wait_until_done(&bus->dev, regs); 192 ret = xgmac_wait_until_done(&bus->dev, regs);
160 if (ret) 193 if (ret)
161 return ret; 194 return ret;
162 195
163 /* Return all Fs if nothing was there */ 196 /* Return all Fs if nothing was there */
164 if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) { 197 if (ioread32be(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
165 dev_err(&bus->dev, 198 dev_err(&bus->dev,
166 "Error while reading PHY%d reg at %d.%hhu\n", 199 "Error while reading PHY%d reg at %d.%hhu\n",
167 phy_id, dev_addr, regnum); 200 phy_id, dev_addr, regnum);
168 return 0xffff; 201 return 0xffff;
169 } 202 }
170 203
171 value = in_be32(&regs->mdio_data) & 0xffff; 204 value = ioread32be(&regs->mdio_data) & 0xffff;
172 dev_dbg(&bus->dev, "read %04x\n", value); 205 dev_dbg(&bus->dev, "read %04x\n", value);
173 206
174 return value; 207 return value;
175} 208}
176 209
177/* Reset the MIIM registers, and wait for the bus to free */
178static int xgmac_mdio_reset(struct mii_bus *bus)
179{
180 struct tgec_mdio_controller __iomem *regs = bus->priv;
181 int ret;
182
183 mutex_lock(&bus->mdio_lock);
184
185 /* Setup the MII Mgmt clock speed */
186 out_be32(&regs->mdio_stat, MDIO_STAT_CLKDIV(100));
187
188 ret = xgmac_wait_until_free(&bus->dev, regs);
189
190 mutex_unlock(&bus->mdio_lock);
191
192 return ret;
193}
194
195static int xgmac_mdio_probe(struct platform_device *pdev) 210static int xgmac_mdio_probe(struct platform_device *pdev)
196{ 211{
197 struct device_node *np = pdev->dev.of_node; 212 struct device_node *np = pdev->dev.of_node;
@@ -205,15 +220,13 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
205 return ret; 220 return ret;
206 } 221 }
207 222
208 bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int)); 223 bus = mdiobus_alloc();
209 if (!bus) 224 if (!bus)
210 return -ENOMEM; 225 return -ENOMEM;
211 226
212 bus->name = "Freescale XGMAC MDIO Bus"; 227 bus->name = "Freescale XGMAC MDIO Bus";
213 bus->read = xgmac_mdio_read; 228 bus->read = xgmac_mdio_read;
214 bus->write = xgmac_mdio_write; 229 bus->write = xgmac_mdio_write;
215 bus->reset = xgmac_mdio_reset;
216 bus->irq = bus->priv;
217 bus->parent = &pdev->dev; 230 bus->parent = &pdev->dev;
218 snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); 231 snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
219 232
@@ -258,6 +271,9 @@ static struct of_device_id xgmac_mdio_match[] = {
258 { 271 {
259 .compatible = "fsl,fman-xmdio", 272 .compatible = "fsl,fman-xmdio",
260 }, 273 },
274 {
275 .compatible = "fsl,fman-memac-mdio",
276 },
261 {}, 277 {},
262}; 278};
263MODULE_DEVICE_TABLE(of, xgmac_mdio_match); 279MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index e9421731b05e..a54d89791311 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -24,4 +24,13 @@ config HIX5HD2_GMAC
24 help 24 help
25 This selects the hix5hd2 mac family network device. 25 This selects the hix5hd2 mac family network device.
26 26
27config HIP04_ETH
28 tristate "HISILICON P04 Ethernet support"
29 select PHYLIB
30 select MARVELL_PHY
31 select MFD_SYSCON
32 ---help---
33 If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
34 want to use the internal ethernet then you should answer Y to this.
35
27endif # NET_VENDOR_HISILICON 36endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 9175e84622d4..6c14540a4dc5 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o 5obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
6obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
new file mode 100644
index 000000000000..b72d238695d7
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -0,0 +1,971 @@
1
2/* Copyright (c) 2014 Linaro Ltd.
3 * Copyright (c) 2014 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/module.h>
12#include <linux/etherdevice.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
15#include <linux/ktime.h>
16#include <linux/of_address.h>
17#include <linux/phy.h>
18#include <linux/of_mdio.h>
19#include <linux/of_net.h>
20#include <linux/mfd/syscon.h>
21#include <linux/regmap.h>
22
23#define PPE_CFG_RX_ADDR 0x100
24#define PPE_CFG_POOL_GRP 0x300
25#define PPE_CFG_RX_BUF_SIZE 0x400
26#define PPE_CFG_RX_FIFO_SIZE 0x500
27#define PPE_CURR_BUF_CNT 0xa200
28
29#define GE_DUPLEX_TYPE 0x08
30#define GE_MAX_FRM_SIZE_REG 0x3c
31#define GE_PORT_MODE 0x40
32#define GE_PORT_EN 0x44
33#define GE_SHORT_RUNTS_THR_REG 0x50
34#define GE_TX_LOCAL_PAGE_REG 0x5c
35#define GE_TRANSMIT_CONTROL_REG 0x60
36#define GE_CF_CRC_STRIP_REG 0x1b0
37#define GE_MODE_CHANGE_REG 0x1b4
38#define GE_RECV_CONTROL_REG 0x1e0
39#define GE_STATION_MAC_ADDRESS 0x210
40#define PPE_CFG_CPU_ADD_ADDR 0x580
41#define PPE_CFG_MAX_FRAME_LEN_REG 0x408
42#define PPE_CFG_BUS_CTRL_REG 0x424
43#define PPE_CFG_RX_CTRL_REG 0x428
44#define PPE_CFG_RX_PKT_MODE_REG 0x438
45#define PPE_CFG_QOS_VMID_GEN 0x500
46#define PPE_CFG_RX_PKT_INT 0x538
47#define PPE_INTEN 0x600
48#define PPE_INTSTS 0x608
49#define PPE_RINT 0x604
50#define PPE_CFG_STS_MODE 0x700
51#define PPE_HIS_RX_PKT_CNT 0x804
52
53/* REG_INTERRUPT */
54#define RCV_INT BIT(10)
55#define RCV_NOBUF BIT(8)
56#define RCV_DROP BIT(7)
57#define TX_DROP BIT(6)
58#define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
59#define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
60
61/* TX descriptor config */
62#define TX_FREE_MEM BIT(0)
63#define TX_READ_ALLOC_L3 BIT(1)
64#define TX_FINISH_CACHE_INV BIT(2)
65#define TX_CLEAR_WB BIT(4)
66#define TX_L3_CHECKSUM BIT(5)
67#define TX_LOOP_BACK BIT(11)
68
69/* RX error */
70#define RX_PKT_DROP BIT(0)
71#define RX_L2_ERR BIT(1)
72#define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
73
74#define SGMII_SPEED_1000 0x08
75#define SGMII_SPEED_100 0x07
76#define SGMII_SPEED_10 0x06
77#define MII_SPEED_100 0x01
78#define MII_SPEED_10 0x00
79
80#define GE_DUPLEX_FULL BIT(0)
81#define GE_DUPLEX_HALF 0x00
82#define GE_MODE_CHANGE_EN BIT(0)
83
84#define GE_TX_AUTO_NEG BIT(5)
85#define GE_TX_ADD_CRC BIT(6)
86#define GE_TX_SHORT_PAD_THROUGH BIT(7)
87
88#define GE_RX_STRIP_CRC BIT(0)
89#define GE_RX_STRIP_PAD BIT(3)
90#define GE_RX_PAD_EN BIT(4)
91
92#define GE_AUTO_NEG_CTL BIT(0)
93
94#define GE_RX_INT_THRESHOLD BIT(6)
95#define GE_RX_TIMEOUT 0x04
96
97#define GE_RX_PORT_EN BIT(1)
98#define GE_TX_PORT_EN BIT(2)
99
100#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
101
102#define PPE_CFG_RX_PKT_ALIGN BIT(18)
103#define PPE_CFG_QOS_VMID_MODE BIT(14)
104#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
105
106#define PPE_CFG_RX_FIFO_FSFU BIT(11)
107#define PPE_CFG_RX_DEPTH_SHIFT 16
108#define PPE_CFG_RX_START_SHIFT 0
109#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
110
111#define PPE_CFG_BUS_LOCAL_REL BIT(14)
112#define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
113
114#define RX_DESC_NUM 128
115#define TX_DESC_NUM 256
116#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
117#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
118
119#define GMAC_PPE_RX_PKT_MAX_LEN 379
120#define GMAC_MAX_PKT_LEN 1516
121#define GMAC_MIN_PKT_LEN 31
122#define RX_BUF_SIZE 1600
123#define RESET_TIMEOUT 1000
124#define TX_TIMEOUT (6 * HZ)
125
126#define DRV_NAME "hip04-ether"
127#define DRV_VERSION "v1.0"
128
129#define HIP04_MAX_TX_COALESCE_USECS 200
130#define HIP04_MIN_TX_COALESCE_USECS 100
131#define HIP04_MAX_TX_COALESCE_FRAMES 200
132#define HIP04_MIN_TX_COALESCE_FRAMES 100
133
134struct tx_desc {
135 u32 send_addr;
136 u32 send_size;
137 u32 next_addr;
138 u32 cfg;
139 u32 wb_addr;
140} __aligned(64);
141
142struct rx_desc {
143 u16 reserved_16;
144 u16 pkt_len;
145 u32 reserve1[3];
146 u32 pkt_err;
147 u32 reserve2[4];
148};
149
150struct hip04_priv {
151 void __iomem *base;
152 int phy_mode;
153 int chan;
154 unsigned int port;
155 unsigned int speed;
156 unsigned int duplex;
157 unsigned int reg_inten;
158
159 struct napi_struct napi;
160 struct net_device *ndev;
161
162 struct tx_desc *tx_desc;
163 dma_addr_t tx_desc_dma;
164 struct sk_buff *tx_skb[TX_DESC_NUM];
165 dma_addr_t tx_phys[TX_DESC_NUM];
166 unsigned int tx_head;
167
168 int tx_coalesce_frames;
169 int tx_coalesce_usecs;
170 struct hrtimer tx_coalesce_timer;
171
172 unsigned char *rx_buf[RX_DESC_NUM];
173 dma_addr_t rx_phys[RX_DESC_NUM];
174 unsigned int rx_head;
175 unsigned int rx_buf_size;
176
177 struct device_node *phy_node;
178 struct phy_device *phy;
179 struct regmap *map;
180 struct work_struct tx_timeout_task;
181
182 /* written only by tx cleanup */
183 unsigned int tx_tail ____cacheline_aligned_in_smp;
184};
185
186static inline unsigned int tx_count(unsigned int head, unsigned int tail)
187{
188 return (head - tail) % (TX_DESC_NUM - 1);
189}
190
191static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
192{
193 struct hip04_priv *priv = netdev_priv(ndev);
194 u32 val;
195
196 priv->speed = speed;
197 priv->duplex = duplex;
198
199 switch (priv->phy_mode) {
200 case PHY_INTERFACE_MODE_SGMII:
201 if (speed == SPEED_1000)
202 val = SGMII_SPEED_1000;
203 else if (speed == SPEED_100)
204 val = SGMII_SPEED_100;
205 else
206 val = SGMII_SPEED_10;
207 break;
208 case PHY_INTERFACE_MODE_MII:
209 if (speed == SPEED_100)
210 val = MII_SPEED_100;
211 else
212 val = MII_SPEED_10;
213 break;
214 default:
215 netdev_warn(ndev, "not supported mode\n");
216 val = MII_SPEED_10;
217 break;
218 }
219 writel_relaxed(val, priv->base + GE_PORT_MODE);
220
221 val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
222 writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
223
224 val = GE_MODE_CHANGE_EN;
225 writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
226}
227
228static void hip04_reset_ppe(struct hip04_priv *priv)
229{
230 u32 val, tmp, timeout = 0;
231
232 do {
233 regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
234 regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
235 if (timeout++ > RESET_TIMEOUT)
236 break;
237 } while (val & 0xfff);
238}
239
240static void hip04_config_fifo(struct hip04_priv *priv)
241{
242 u32 val;
243
244 val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
245 val |= PPE_CFG_STS_RX_PKT_CNT_RC;
246 writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
247
248 val = BIT(priv->port);
249 regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
250
251 val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
252 val |= PPE_CFG_QOS_VMID_MODE;
253 writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
254
255 val = RX_BUF_SIZE;
256 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
257
258 val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
259 val |= PPE_CFG_RX_FIFO_FSFU;
260 val |= priv->chan << PPE_CFG_RX_START_SHIFT;
261 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
262
263 val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
264 writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
265
266 val = PPE_CFG_RX_PKT_ALIGN;
267 writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
268
269 val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
270 writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
271
272 val = GMAC_PPE_RX_PKT_MAX_LEN;
273 writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
274
275 val = GMAC_MAX_PKT_LEN;
276 writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
277
278 val = GMAC_MIN_PKT_LEN;
279 writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
280
281 val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
282 val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
283 writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
284
285 val = GE_RX_STRIP_CRC;
286 writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
287
288 val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
289 val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
290 writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
291
292 val = GE_AUTO_NEG_CTL;
293 writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
294}
295
296static void hip04_mac_enable(struct net_device *ndev)
297{
298 struct hip04_priv *priv = netdev_priv(ndev);
299 u32 val;
300
301 /* enable tx & rx */
302 val = readl_relaxed(priv->base + GE_PORT_EN);
303 val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
304 writel_relaxed(val, priv->base + GE_PORT_EN);
305
306 /* clear rx int */
307 val = RCV_INT;
308 writel_relaxed(val, priv->base + PPE_RINT);
309
310 /* config recv int */
311 val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
312 writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
313
314 /* enable interrupt */
315 priv->reg_inten = DEF_INT_MASK;
316 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
317}
318
319static void hip04_mac_disable(struct net_device *ndev)
320{
321 struct hip04_priv *priv = netdev_priv(ndev);
322 u32 val;
323
324 /* disable int */
325 priv->reg_inten &= ~(DEF_INT_MASK);
326 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
327
328 /* disable tx & rx */
329 val = readl_relaxed(priv->base + GE_PORT_EN);
330 val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
331 writel_relaxed(val, priv->base + GE_PORT_EN);
332}
333
334static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
335{
336 writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
337}
338
339static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
340{
341 regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
342}
343
344static u32 hip04_recv_cnt(struct hip04_priv *priv)
345{
346 return readl(priv->base + PPE_HIS_RX_PKT_CNT);
347}
348
349static void hip04_update_mac_address(struct net_device *ndev)
350{
351 struct hip04_priv *priv = netdev_priv(ndev);
352
353 writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
354 priv->base + GE_STATION_MAC_ADDRESS);
355 writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
356 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
357 priv->base + GE_STATION_MAC_ADDRESS + 4);
358}
359
360static int hip04_set_mac_address(struct net_device *ndev, void *addr)
361{
362 eth_mac_addr(ndev, addr);
363 hip04_update_mac_address(ndev);
364 return 0;
365}
366
367static int hip04_tx_reclaim(struct net_device *ndev, bool force)
368{
369 struct hip04_priv *priv = netdev_priv(ndev);
370 unsigned tx_tail = priv->tx_tail;
371 struct tx_desc *desc;
372 unsigned int bytes_compl = 0, pkts_compl = 0;
373 unsigned int count;
374
375 smp_rmb();
376 count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
377 if (count == 0)
378 goto out;
379
380 while (count) {
381 desc = &priv->tx_desc[tx_tail];
382 if (desc->send_addr != 0) {
383 if (force)
384 desc->send_addr = 0;
385 else
386 break;
387 }
388
389 if (priv->tx_phys[tx_tail]) {
390 dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
391 priv->tx_skb[tx_tail]->len,
392 DMA_TO_DEVICE);
393 priv->tx_phys[tx_tail] = 0;
394 }
395 pkts_compl++;
396 bytes_compl += priv->tx_skb[tx_tail]->len;
397 dev_kfree_skb(priv->tx_skb[tx_tail]);
398 priv->tx_skb[tx_tail] = NULL;
399 tx_tail = TX_NEXT(tx_tail);
400 count--;
401 }
402
403 priv->tx_tail = tx_tail;
404 smp_wmb(); /* Ensure tx_tail visible to xmit */
405
406out:
407 if (pkts_compl || bytes_compl)
408 netdev_completed_queue(ndev, pkts_compl, bytes_compl);
409
410 if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
411 netif_wake_queue(ndev);
412
413 return count;
414}
415
416static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
417{
418 struct hip04_priv *priv = netdev_priv(ndev);
419 struct net_device_stats *stats = &ndev->stats;
420 unsigned int tx_head = priv->tx_head, count;
421 struct tx_desc *desc = &priv->tx_desc[tx_head];
422 dma_addr_t phys;
423
424 smp_rmb();
425 count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
426 if (count == (TX_DESC_NUM - 1)) {
427 netif_stop_queue(ndev);
428 return NETDEV_TX_BUSY;
429 }
430
431 phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
432 if (dma_mapping_error(&ndev->dev, phys)) {
433 dev_kfree_skb(skb);
434 return NETDEV_TX_OK;
435 }
436
437 priv->tx_skb[tx_head] = skb;
438 priv->tx_phys[tx_head] = phys;
439 desc->send_addr = cpu_to_be32(phys);
440 desc->send_size = cpu_to_be32(skb->len);
441 desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
442 phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
443 desc->wb_addr = cpu_to_be32(phys);
444 skb_tx_timestamp(skb);
445
446 hip04_set_xmit_desc(priv, phys);
447 priv->tx_head = TX_NEXT(tx_head);
448 count++;
449 netdev_sent_queue(ndev, skb->len);
450
451 stats->tx_bytes += skb->len;
452 stats->tx_packets++;
453
454 /* Ensure tx_head update visible to tx reclaim */
455 smp_wmb();
456
457 /* queue is getting full, better start cleaning up now */
458 if (count >= priv->tx_coalesce_frames) {
459 if (napi_schedule_prep(&priv->napi)) {
460 /* disable rx interrupt and timer */
461 priv->reg_inten &= ~(RCV_INT);
462 writel_relaxed(DEF_INT_MASK & ~RCV_INT,
463 priv->base + PPE_INTEN);
464 hrtimer_cancel(&priv->tx_coalesce_timer);
465 __napi_schedule(&priv->napi);
466 }
467 } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
468 /* cleanup not pending yet, start a new timer */
469 hrtimer_start_expires(&priv->tx_coalesce_timer,
470 HRTIMER_MODE_REL);
471 }
472
473 return NETDEV_TX_OK;
474}
475
476static int hip04_rx_poll(struct napi_struct *napi, int budget)
477{
478 struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
479 struct net_device *ndev = priv->ndev;
480 struct net_device_stats *stats = &ndev->stats;
481 unsigned int cnt = hip04_recv_cnt(priv);
482 struct rx_desc *desc;
483 struct sk_buff *skb;
484 unsigned char *buf;
485 bool last = false;
486 dma_addr_t phys;
487 int rx = 0;
488 int tx_remaining;
489 u16 len;
490 u32 err;
491
492 while (cnt && !last) {
493 buf = priv->rx_buf[priv->rx_head];
494 skb = build_skb(buf, priv->rx_buf_size);
495 if (unlikely(!skb))
496 net_dbg_ratelimited("build_skb failed\n");
497
498 dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
499 RX_BUF_SIZE, DMA_FROM_DEVICE);
500 priv->rx_phys[priv->rx_head] = 0;
501
502 desc = (struct rx_desc *)skb->data;
503 len = be16_to_cpu(desc->pkt_len);
504 err = be32_to_cpu(desc->pkt_err);
505
506 if (0 == len) {
507 dev_kfree_skb_any(skb);
508 last = true;
509 } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
510 dev_kfree_skb_any(skb);
511 stats->rx_dropped++;
512 stats->rx_errors++;
513 } else {
514 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
515 skb_put(skb, len);
516 skb->protocol = eth_type_trans(skb, ndev);
517 napi_gro_receive(&priv->napi, skb);
518 stats->rx_packets++;
519 stats->rx_bytes += len;
520 rx++;
521 }
522
523 buf = netdev_alloc_frag(priv->rx_buf_size);
524 if (!buf)
525 goto done;
526 phys = dma_map_single(&ndev->dev, buf,
527 RX_BUF_SIZE, DMA_FROM_DEVICE);
528 if (dma_mapping_error(&ndev->dev, phys))
529 goto done;
530 priv->rx_buf[priv->rx_head] = buf;
531 priv->rx_phys[priv->rx_head] = phys;
532 hip04_set_recv_desc(priv, phys);
533
534 priv->rx_head = RX_NEXT(priv->rx_head);
535 if (rx >= budget)
536 goto done;
537
538 if (--cnt == 0)
539 cnt = hip04_recv_cnt(priv);
540 }
541
542 if (!(priv->reg_inten & RCV_INT)) {
543 /* enable rx interrupt */
544 priv->reg_inten |= RCV_INT;
545 writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
546 }
547 napi_complete(napi);
548done:
549 /* clean up tx descriptors and start a new timer if necessary */
550 tx_remaining = hip04_tx_reclaim(ndev, false);
551 if (rx < budget && tx_remaining)
552 hrtimer_start_expires(&priv->tx_coalesce_timer, HRTIMER_MODE_REL);
553
554 return rx;
555}
556
557static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
558{
559 struct net_device *ndev = (struct net_device *)dev_id;
560 struct hip04_priv *priv = netdev_priv(ndev);
561 struct net_device_stats *stats = &ndev->stats;
562 u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
563
564 if (!ists)
565 return IRQ_NONE;
566
567 writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
568
569 if (unlikely(ists & DEF_INT_ERR)) {
570 if (ists & (RCV_NOBUF | RCV_DROP)) {
571 stats->rx_errors++;
572 stats->rx_dropped++;
573 netdev_err(ndev, "rx drop\n");
574 }
575 if (ists & TX_DROP) {
576 stats->tx_dropped++;
577 netdev_err(ndev, "tx drop\n");
578 }
579 }
580
581 if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
582 /* disable rx interrupt */
583 priv->reg_inten &= ~(RCV_INT);
584 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
585 hrtimer_cancel(&priv->tx_coalesce_timer);
586 __napi_schedule(&priv->napi);
587 }
588
589 return IRQ_HANDLED;
590}
591
592enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
593{
594 struct hip04_priv *priv;
595
596 priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
597
598 if (napi_schedule_prep(&priv->napi)) {
599 /* disable rx interrupt */
600 priv->reg_inten &= ~(RCV_INT);
601 writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
602 __napi_schedule(&priv->napi);
603 }
604
605 return HRTIMER_NORESTART;
606}
607
608static void hip04_adjust_link(struct net_device *ndev)
609{
610 struct hip04_priv *priv = netdev_priv(ndev);
611 struct phy_device *phy = priv->phy;
612
613 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
614 hip04_config_port(ndev, phy->speed, phy->duplex);
615 phy_print_status(phy);
616 }
617}
618
619static int hip04_mac_open(struct net_device *ndev)
620{
621 struct hip04_priv *priv = netdev_priv(ndev);
622 int i;
623
624 priv->rx_head = 0;
625 priv->tx_head = 0;
626 priv->tx_tail = 0;
627 hip04_reset_ppe(priv);
628
629 for (i = 0; i < RX_DESC_NUM; i++) {
630 dma_addr_t phys;
631
632 phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
633 RX_BUF_SIZE, DMA_FROM_DEVICE);
634 if (dma_mapping_error(&ndev->dev, phys))
635 return -EIO;
636
637 priv->rx_phys[i] = phys;
638 hip04_set_recv_desc(priv, phys);
639 }
640
641 if (priv->phy)
642 phy_start(priv->phy);
643
644 netdev_reset_queue(ndev);
645 netif_start_queue(ndev);
646 hip04_mac_enable(ndev);
647 napi_enable(&priv->napi);
648
649 return 0;
650}
651
652static int hip04_mac_stop(struct net_device *ndev)
653{
654 struct hip04_priv *priv = netdev_priv(ndev);
655 int i;
656
657 napi_disable(&priv->napi);
658 netif_stop_queue(ndev);
659 hip04_mac_disable(ndev);
660 hip04_tx_reclaim(ndev, true);
661 hip04_reset_ppe(priv);
662
663 if (priv->phy)
664 phy_stop(priv->phy);
665
666 for (i = 0; i < RX_DESC_NUM; i++) {
667 if (priv->rx_phys[i]) {
668 dma_unmap_single(&ndev->dev, priv->rx_phys[i],
669 RX_BUF_SIZE, DMA_FROM_DEVICE);
670 priv->rx_phys[i] = 0;
671 }
672 }
673
674 return 0;
675}
676
677static void hip04_timeout(struct net_device *ndev)
678{
679 struct hip04_priv *priv = netdev_priv(ndev);
680
681 schedule_work(&priv->tx_timeout_task);
682}
683
684static void hip04_tx_timeout_task(struct work_struct *work)
685{
686 struct hip04_priv *priv;
687
688 priv = container_of(work, struct hip04_priv, tx_timeout_task);
689 hip04_mac_stop(priv->ndev);
690 hip04_mac_open(priv->ndev);
691}
692
693static struct net_device_stats *hip04_get_stats(struct net_device *ndev)
694{
695 return &ndev->stats;
696}
697
698static int hip04_get_coalesce(struct net_device *netdev,
699 struct ethtool_coalesce *ec)
700{
701 struct hip04_priv *priv = netdev_priv(netdev);
702
703 ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
704 ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
705
706 return 0;
707}
708
709static int hip04_set_coalesce(struct net_device *netdev,
710 struct ethtool_coalesce *ec)
711{
712 struct hip04_priv *priv = netdev_priv(netdev);
713
714 /* Check not supported parameters */
715 if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
716 (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
717 (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
718 (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
719 (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
720 (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
721 (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
722 (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
723 (ec->tx_max_coalesced_frames_irq) ||
724 (ec->stats_block_coalesce_usecs) ||
725 (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
726 return -EOPNOTSUPP;
727
728 if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
729 ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
730 (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
731 ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
732 return -EINVAL;
733
734 priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
735 priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
736
737 return 0;
738}
739
740static void hip04_get_drvinfo(struct net_device *netdev,
741 struct ethtool_drvinfo *drvinfo)
742{
743 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
744 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
745}
746
747static struct ethtool_ops hip04_ethtool_ops = {
748 .get_coalesce = hip04_get_coalesce,
749 .set_coalesce = hip04_set_coalesce,
750 .get_drvinfo = hip04_get_drvinfo,
751};
752
753static struct net_device_ops hip04_netdev_ops = {
754 .ndo_open = hip04_mac_open,
755 .ndo_stop = hip04_mac_stop,
756 .ndo_get_stats = hip04_get_stats,
757 .ndo_start_xmit = hip04_mac_start_xmit,
758 .ndo_set_mac_address = hip04_set_mac_address,
759 .ndo_tx_timeout = hip04_timeout,
760 .ndo_validate_addr = eth_validate_addr,
761 .ndo_change_mtu = eth_change_mtu,
762};
763
764static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
765{
766 struct hip04_priv *priv = netdev_priv(ndev);
767 int i;
768
769 priv->tx_desc = dma_alloc_coherent(d,
770 TX_DESC_NUM * sizeof(struct tx_desc),
771 &priv->tx_desc_dma, GFP_KERNEL);
772 if (!priv->tx_desc)
773 return -ENOMEM;
774
775 priv->rx_buf_size = RX_BUF_SIZE +
776 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
777 for (i = 0; i < RX_DESC_NUM; i++) {
778 priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
779 if (!priv->rx_buf[i])
780 return -ENOMEM;
781 }
782
783 return 0;
784}
785
786static void hip04_free_ring(struct net_device *ndev, struct device *d)
787{
788 struct hip04_priv *priv = netdev_priv(ndev);
789 int i;
790
791 for (i = 0; i < RX_DESC_NUM; i++)
792 if (priv->rx_buf[i])
793 put_page(virt_to_head_page(priv->rx_buf[i]));
794
795 for (i = 0; i < TX_DESC_NUM; i++)
796 if (priv->tx_skb[i])
797 dev_kfree_skb_any(priv->tx_skb[i]);
798
799 dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
800 priv->tx_desc, priv->tx_desc_dma);
801}
802
803static int hip04_mac_probe(struct platform_device *pdev)
804{
805 struct device *d = &pdev->dev;
806 struct device_node *node = d->of_node;
807 struct of_phandle_args arg;
808 struct net_device *ndev;
809 struct hip04_priv *priv;
810 struct resource *res;
811 unsigned int irq;
812 ktime_t txtime;
813 int ret;
814
815 ndev = alloc_etherdev(sizeof(struct hip04_priv));
816 if (!ndev)
817 return -ENOMEM;
818
819 priv = netdev_priv(ndev);
820 priv->ndev = ndev;
821 platform_set_drvdata(pdev, ndev);
822
823 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
824 priv->base = devm_ioremap_resource(d, res);
825 if (IS_ERR(priv->base)) {
826 ret = PTR_ERR(priv->base);
827 goto init_fail;
828 }
829
830 ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
831 if (ret < 0) {
832 dev_warn(d, "no port-handle\n");
833 goto init_fail;
834 }
835
836 priv->port = arg.args[0];
837 priv->chan = arg.args[1] * RX_DESC_NUM;
838
839 hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
840
841 /* BQL will try to keep the TX queue as short as possible, but it can't
842 * be faster than tx_coalesce_usecs, so we need a fast timeout here,
843 * but also long enough to gather up enough frames to ensure we don't
844 * get more interrupts than necessary.
845 * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
846 */
847 priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
848 priv->tx_coalesce_usecs = 200;
849 /* allow timer to fire after half the time at the earliest */
850 txtime = ktime_set(0, priv->tx_coalesce_usecs * NSEC_PER_USEC / 2);
851 hrtimer_set_expires_range(&priv->tx_coalesce_timer, txtime, txtime);
852 priv->tx_coalesce_timer.function = tx_done;
853
854 priv->map = syscon_node_to_regmap(arg.np);
855 if (IS_ERR(priv->map)) {
856 dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
857 ret = PTR_ERR(priv->map);
858 goto init_fail;
859 }
860
861 priv->phy_mode = of_get_phy_mode(node);
862 if (priv->phy_mode < 0) {
863 dev_warn(d, "not find phy-mode\n");
864 ret = -EINVAL;
865 goto init_fail;
866 }
867
868 irq = platform_get_irq(pdev, 0);
869 if (irq <= 0) {
870 ret = -EINVAL;
871 goto init_fail;
872 }
873
874 ret = devm_request_irq(d, irq, hip04_mac_interrupt,
875 0, pdev->name, ndev);
876 if (ret) {
877 netdev_err(ndev, "devm_request_irq failed\n");
878 goto init_fail;
879 }
880
881 priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
882 if (priv->phy_node) {
883 priv->phy = of_phy_connect(ndev, priv->phy_node,
884 &hip04_adjust_link,
885 0, priv->phy_mode);
886 if (!priv->phy) {
887 ret = -EPROBE_DEFER;
888 goto init_fail;
889 }
890 }
891
892 INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
893
894 ether_setup(ndev);
895 ndev->netdev_ops = &hip04_netdev_ops;
896 ndev->ethtool_ops = &hip04_ethtool_ops;
897 ndev->watchdog_timeo = TX_TIMEOUT;
898 ndev->priv_flags |= IFF_UNICAST_FLT;
899 ndev->irq = irq;
900 netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
901 SET_NETDEV_DEV(ndev, &pdev->dev);
902
903 hip04_reset_ppe(priv);
904 if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
905 hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
906
907 hip04_config_fifo(priv);
908 random_ether_addr(ndev->dev_addr);
909 hip04_update_mac_address(ndev);
910
911 ret = hip04_alloc_ring(ndev, d);
912 if (ret) {
913 netdev_err(ndev, "alloc ring fail\n");
914 goto alloc_fail;
915 }
916
917 ret = register_netdev(ndev);
918 if (ret) {
919 free_netdev(ndev);
920 goto alloc_fail;
921 }
922
923 return 0;
924
925alloc_fail:
926 hip04_free_ring(ndev, d);
927init_fail:
928 of_node_put(priv->phy_node);
929 free_netdev(ndev);
930 return ret;
931}
932
933static int hip04_remove(struct platform_device *pdev)
934{
935 struct net_device *ndev = platform_get_drvdata(pdev);
936 struct hip04_priv *priv = netdev_priv(ndev);
937 struct device *d = &pdev->dev;
938
939 if (priv->phy)
940 phy_disconnect(priv->phy);
941
942 hip04_free_ring(ndev, d);
943 unregister_netdev(ndev);
944 free_irq(ndev->irq, ndev);
945 of_node_put(priv->phy_node);
946 cancel_work_sync(&priv->tx_timeout_task);
947 free_netdev(ndev);
948
949 return 0;
950}
951
952static const struct of_device_id hip04_mac_match[] = {
953 { .compatible = "hisilicon,hip04-mac" },
954 { }
955};
956
957MODULE_DEVICE_TABLE(of, hip04_mac_match);
958
959static struct platform_driver hip04_mac_driver = {
960 .probe = hip04_mac_probe,
961 .remove = hip04_remove,
962 .driver = {
963 .name = DRV_NAME,
964 .owner = THIS_MODULE,
965 .of_match_table = hip04_mac_match,
966 },
967};
968module_platform_driver(hip04_mac_driver);
969
970MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
971MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
new file mode 100644
index 000000000000..b3bac25db99c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c
@@ -0,0 +1,186 @@
1/* Copyright (c) 2014 Linaro Ltd.
2 * Copyright (c) 2014 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/io.h>
13#include <linux/of_mdio.h>
14#include <linux/delay.h>
15
16#define MDIO_CMD_REG 0x0
17#define MDIO_ADDR_REG 0x4
18#define MDIO_WDATA_REG 0x8
19#define MDIO_RDATA_REG 0xc
20#define MDIO_STA_REG 0x10
21
22#define MDIO_START BIT(14)
23#define MDIO_R_VALID BIT(1)
24#define MDIO_READ (BIT(12) | BIT(11) | MDIO_START)
25#define MDIO_WRITE (BIT(12) | BIT(10) | MDIO_START)
26
27struct hip04_mdio_priv {
28 void __iomem *base;
29};
30
31#define WAIT_TIMEOUT 10
32static int hip04_mdio_wait_ready(struct mii_bus *bus)
33{
34 struct hip04_mdio_priv *priv = bus->priv;
35 int i;
36
37 for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) {
38 if (i == WAIT_TIMEOUT)
39 return -ETIMEDOUT;
40 msleep(20);
41 }
42
43 return 0;
44}
45
46static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
47{
48 struct hip04_mdio_priv *priv = bus->priv;
49 u32 val;
50 int ret;
51
52 ret = hip04_mdio_wait_ready(bus);
53 if (ret < 0)
54 goto out;
55
56 val = regnum | (mii_id << 5) | MDIO_READ;
57 writel_relaxed(val, priv->base + MDIO_CMD_REG);
58
59 ret = hip04_mdio_wait_ready(bus);
60 if (ret < 0)
61 goto out;
62
63 val = readl_relaxed(priv->base + MDIO_STA_REG);
64 if (val & MDIO_R_VALID) {
65 dev_err(bus->parent, "SMI bus read not valid\n");
66 ret = -ENODEV;
67 goto out;
68 }
69
70 val = readl_relaxed(priv->base + MDIO_RDATA_REG);
71 ret = val & 0xFFFF;
72out:
73 return ret;
74}
75
76static int hip04_mdio_write(struct mii_bus *bus, int mii_id,
77 int regnum, u16 value)
78{
79 struct hip04_mdio_priv *priv = bus->priv;
80 u32 val;
81 int ret;
82
83 ret = hip04_mdio_wait_ready(bus);
84 if (ret < 0)
85 goto out;
86
87 writel_relaxed(value, priv->base + MDIO_WDATA_REG);
88 val = regnum | (mii_id << 5) | MDIO_WRITE;
89 writel_relaxed(val, priv->base + MDIO_CMD_REG);
90out:
91 return ret;
92}
93
94static int hip04_mdio_reset(struct mii_bus *bus)
95{
96 int temp, i;
97
98 for (i = 0; i < PHY_MAX_ADDR; i++) {
99 hip04_mdio_write(bus, i, 22, 0);
100 temp = hip04_mdio_read(bus, i, MII_BMCR);
101 if (temp < 0)
102 continue;
103
104 temp |= BMCR_RESET;
105 if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0)
106 continue;
107 }
108
109 mdelay(500);
110 return 0;
111}
112
113static int hip04_mdio_probe(struct platform_device *pdev)
114{
115 struct resource *r;
116 struct mii_bus *bus;
117 struct hip04_mdio_priv *priv;
118 int ret;
119
120 bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv));
121 if (!bus) {
122 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
123 return -ENOMEM;
124 }
125
126 bus->name = "hip04_mdio_bus";
127 bus->read = hip04_mdio_read;
128 bus->write = hip04_mdio_write;
129 bus->reset = hip04_mdio_reset;
130 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
131 bus->parent = &pdev->dev;
132 priv = bus->priv;
133
134 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
135 priv->base = devm_ioremap_resource(&pdev->dev, r);
136 if (IS_ERR(priv->base)) {
137 ret = PTR_ERR(priv->base);
138 goto out_mdio;
139 }
140
141 ret = of_mdiobus_register(bus, pdev->dev.of_node);
142 if (ret < 0) {
143 dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
144 goto out_mdio;
145 }
146
147 platform_set_drvdata(pdev, bus);
148
149 return 0;
150
151out_mdio:
152 mdiobus_free(bus);
153 return ret;
154}
155
156static int hip04_mdio_remove(struct platform_device *pdev)
157{
158 struct mii_bus *bus = platform_get_drvdata(pdev);
159
160 mdiobus_unregister(bus);
161 mdiobus_free(bus);
162
163 return 0;
164}
165
166static const struct of_device_id hip04_mdio_match[] = {
167 { .compatible = "hisilicon,hip04-mdio" },
168 { }
169};
170MODULE_DEVICE_TABLE(of, hip04_mdio_match);
171
172static struct platform_driver hip04_mdio_driver = {
173 .probe = hip04_mdio_probe,
174 .remove = hip04_mdio_remove,
175 .driver = {
176 .name = "hip04-mdio",
177 .owner = THIS_MODULE,
178 .of_match_table = hip04_mdio_match,
179 },
180};
181
182module_platform_driver(hip04_mdio_driver);
183
184MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver");
185MODULE_LICENSE("GPL v2");
186MODULE_ALIAS("platform:hip04-mdio");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 566b17db135a..e8a1adb7a962 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2064,9 +2064,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2064 memset(swqe, 0, SWQE_HEADER_SIZE); 2064 memset(swqe, 0, SWQE_HEADER_SIZE);
2065 atomic_dec(&pr->swqe_avail); 2065 atomic_dec(&pr->swqe_avail);
2066 2066
2067 if (vlan_tx_tag_present(skb)) { 2067 if (skb_vlan_tag_present(skb)) {
2068 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; 2068 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2069 swqe->vlan_tag = vlan_tx_tag_get(skb); 2069 swqe->vlan_tag = skb_vlan_tag_get(skb);
2070 } 2070 }
2071 2071
2072 pr->tx_packets++; 2072 pr->tx_packets++;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 9388a83818f2..162762d1a12c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2367,7 +2367,7 @@ static int emac_wait_deps(struct emac_instance *dev)
2367 err = emac_check_deps(dev, deps) ? 0 : -ENODEV; 2367 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2368 for (i = 0; i < EMAC_DEP_COUNT; i++) { 2368 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2369 of_node_put(deps[i].node); 2369 of_node_put(deps[i].node);
2370 if (err && deps[i].ofdev) 2370 if (err)
2371 of_dev_put(deps[i].ofdev); 2371 of_dev_put(deps[i].ofdev);
2372 } 2372 }
2373 if (err == 0) { 2373 if (err == 0) {
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 4d61ef50b465..f4ff465584a0 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -192,6 +192,17 @@ config IXGBE
192 To compile this driver as a module, choose M here. The module 192 To compile this driver as a module, choose M here. The module
193 will be called ixgbe. 193 will be called ixgbe.
194 194
195config IXGBE_VXLAN
196 bool "Virtual eXtensible Local Area Network Support"
197 default n
198 depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m)
199 ---help---
200 This allows one to create VXLAN virtual interfaces that provide
201 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
202 to tunnel virtual network infrastructure in virtualized environments.
203 Say Y here if you want to use Virtual eXtensible Local Area Network
204 (VXLAN) in the driver.
205
195config IXGBE_HWMON 206config IXGBE_HWMON
196 bool "Intel(R) 10GbE PCI Express adapters HWMON support" 207 bool "Intel(R) 10GbE PCI Express adapters HWMON support"
197 default y 208 default y
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index b691eb4f6376..4270ad2d4ddf 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -24,6 +24,7 @@
24/* ethtool support for e1000 */ 24/* ethtool support for e1000 */
25 25
26#include "e1000.h" 26#include "e1000.h"
27#include <linux/jiffies.h>
27#include <linux/uaccess.h> 28#include <linux/uaccess.h>
28 29
29enum {NETDEV_STATS, E1000_STATS}; 30enum {NETDEV_STATS, E1000_STATS};
@@ -1460,7 +1461,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1460 ret_val = 13; /* ret_val is the same as mis-compare */ 1461 ret_val = 13; /* ret_val is the same as mis-compare */
1461 break; 1462 break;
1462 } 1463 }
1463 if (jiffies >= (time + 2)) { 1464 if (time_after_eq(jiffies, time + 2)) {
1464 ret_val = 14; /* error code for time out error */ 1465 ret_val = 14; /* error code for time out error */
1465 break; 1466 break;
1466 } 1467 }
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 83140cbb5f01..7f997d36948f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2977,7 +2977,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
2977 struct e1000_tx_ring *tx_ring, int tx_flags, 2977 struct e1000_tx_ring *tx_ring, int tx_flags,
2978 int count) 2978 int count)
2979{ 2979{
2980 struct e1000_hw *hw = &adapter->hw;
2981 struct e1000_tx_desc *tx_desc = NULL; 2980 struct e1000_tx_desc *tx_desc = NULL;
2982 struct e1000_tx_buffer *buffer_info; 2981 struct e1000_tx_buffer *buffer_info;
2983 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2982 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -3031,11 +3030,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
3031 wmb(); 3030 wmb();
3032 3031
3033 tx_ring->next_to_use = i; 3032 tx_ring->next_to_use = i;
3034 writel(i, hw->hw_addr + tx_ring->tdt);
3035 /* we need this if more than one processor can write to our tail
3036 * at a time, it synchronizes IO on IA64/Altix systems
3037 */
3038 mmiowb();
3039} 3033}
3040 3034
3041/* 82547 workaround to avoid controller hang in half-duplex environment. 3035/* 82547 workaround to avoid controller hang in half-duplex environment.
@@ -3226,9 +3220,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3226 return NETDEV_TX_BUSY; 3220 return NETDEV_TX_BUSY;
3227 } 3221 }
3228 3222
3229 if (vlan_tx_tag_present(skb)) { 3223 if (skb_vlan_tag_present(skb)) {
3230 tx_flags |= E1000_TX_FLAGS_VLAN; 3224 tx_flags |= E1000_TX_FLAGS_VLAN;
3231 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 3225 tx_flags |= (skb_vlan_tag_get(skb) <<
3226 E1000_TX_FLAGS_VLAN_SHIFT);
3232 } 3227 }
3233 3228
3234 first = tx_ring->next_to_use; 3229 first = tx_ring->next_to_use;
@@ -3263,6 +3258,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3263 /* Make sure there is space in the ring for the next send. */ 3258 /* Make sure there is space in the ring for the next send. */
3264 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3259 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3265 3260
3261 if (!skb->xmit_more ||
3262 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3263 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3264 /* we need this if more than one processor can write to
3265 * our tail at a time, it synchronizes IO on IA64/Altix
3266 * systems
3267 */
3268 mmiowb();
3269 }
3266 } else { 3270 } else {
3267 dev_kfree_skb_any(skb); 3271 dev_kfree_skb_any(skb);
3268 tx_ring->buffer_info[first].time_stamp = 0; 3272 tx_ring->buffer_info[first].time_stamp = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 7785240a0da1..9416e5a7e0c8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -34,7 +34,7 @@
34#include <linux/pci-aspm.h> 34#include <linux/pci-aspm.h>
35#include <linux/crc32.h> 35#include <linux/crc32.h>
36#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/clocksource.h> 37#include <linux/timecounter.h>
38#include <linux/net_tstamp.h> 38#include <linux/net_tstamp.h>
39#include <linux/ptp_clock_kernel.h> 39#include <linux/ptp_clock_kernel.h>
40#include <linux/ptp_classify.h> 40#include <linux/ptp_classify.h>
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e14fd85f64eb..1e8c40fd5c3d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4189,7 +4189,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
4189 /* Setup hardware time stamping cyclecounter */ 4189 /* Setup hardware time stamping cyclecounter */
4190 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { 4190 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4191 adapter->cc.read = e1000e_cyclecounter_read; 4191 adapter->cc.read = e1000e_cyclecounter_read;
4192 adapter->cc.mask = CLOCKSOURCE_MASK(64); 4192 adapter->cc.mask = CYCLECOUNTER_MASK(64);
4193 adapter->cc.mult = 1; 4193 adapter->cc.mult = 1;
4194 /* cc.shift set in e1000e_get_base_tininca() */ 4194 /* cc.shift set in e1000e_get_base_tininca() */
4195 4195
@@ -5444,16 +5444,6 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5444 wmb(); 5444 wmb();
5445 5445
5446 tx_ring->next_to_use = i; 5446 tx_ring->next_to_use = i;
5447
5448 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5449 e1000e_update_tdt_wa(tx_ring, i);
5450 else
5451 writel(i, tx_ring->tail);
5452
5453 /* we need this if more than one processor can write to our tail
5454 * at a time, it synchronizes IO on IA64/Altix systems
5455 */
5456 mmiowb();
5457} 5447}
5458 5448
5459#define MINIMUM_DHCP_PACKET_SIZE 282 5449#define MINIMUM_DHCP_PACKET_SIZE 282
@@ -5463,8 +5453,8 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5463 struct e1000_hw *hw = &adapter->hw; 5453 struct e1000_hw *hw = &adapter->hw;
5464 u16 length, offset; 5454 u16 length, offset;
5465 5455
5466 if (vlan_tx_tag_present(skb) && 5456 if (skb_vlan_tag_present(skb) &&
5467 !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 5457 !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5468 (adapter->hw.mng_cookie.status & 5458 (adapter->hw.mng_cookie.status &
5469 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) 5459 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5470 return 0; 5460 return 0;
@@ -5603,9 +5593,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5603 if (e1000_maybe_stop_tx(tx_ring, count + 2)) 5593 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5604 return NETDEV_TX_BUSY; 5594 return NETDEV_TX_BUSY;
5605 5595
5606 if (vlan_tx_tag_present(skb)) { 5596 if (skb_vlan_tag_present(skb)) {
5607 tx_flags |= E1000_TX_FLAGS_VLAN; 5597 tx_flags |= E1000_TX_FLAGS_VLAN;
5608 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 5598 tx_flags |= (skb_vlan_tag_get(skb) <<
5599 E1000_TX_FLAGS_VLAN_SHIFT);
5609 } 5600 }
5610 5601
5611 first = tx_ring->next_to_use; 5602 first = tx_ring->next_to_use;
@@ -5635,8 +5626,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5635 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, 5626 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5636 nr_frags); 5627 nr_frags);
5637 if (count) { 5628 if (count) {
5638 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 5629 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5639 !adapter->tx_hwtstamp_skb)) { 5630 (adapter->flags & FLAG_HAS_HW_TIMESTAMP) &&
5631 !adapter->tx_hwtstamp_skb) {
5640 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 5632 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5641 tx_flags |= E1000_TX_FLAGS_HWTSTAMP; 5633 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5642 adapter->tx_hwtstamp_skb = skb_get(skb); 5634 adapter->tx_hwtstamp_skb = skb_get(skb);
@@ -5653,6 +5645,21 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5653 (MAX_SKB_FRAGS * 5645 (MAX_SKB_FRAGS *
5654 DIV_ROUND_UP(PAGE_SIZE, 5646 DIV_ROUND_UP(PAGE_SIZE,
5655 adapter->tx_fifo_limit) + 2)); 5647 adapter->tx_fifo_limit) + 2));
5648
5649 if (!skb->xmit_more ||
5650 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
5651 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5652 e1000e_update_tdt_wa(tx_ring,
5653 tx_ring->next_to_use);
5654 else
5655 writel(tx_ring->next_to_use, tx_ring->tail);
5656
5657 /* we need this if more than one processor can write
5658 * to our tail at a time, it synchronizes IO on
5659 *IA64/Altix systems
5660 */
5661 mmiowb();
5662 }
5656 } else { 5663 } else {
5657 dev_kfree_skb_any(skb); 5664 dev_kfree_skb_any(skb);
5658 tx_ring->buffer_info[first].time_stamp = 0; 5665 tx_ring->buffer_info[first].time_stamp = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index fb1a914a3ad4..978ef9c4a043 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -90,12 +90,9 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
90 struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, 90 struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
91 ptp_clock_info); 91 ptp_clock_info);
92 unsigned long flags; 92 unsigned long flags;
93 s64 now;
94 93
95 spin_lock_irqsave(&adapter->systim_lock, flags); 94 spin_lock_irqsave(&adapter->systim_lock, flags);
96 now = timecounter_read(&adapter->tc); 95 timecounter_adjtime(&adapter->tc, delta);
97 now += delta;
98 timecounter_init(&adapter->tc, &adapter->cc, now);
99 spin_unlock_irqrestore(&adapter->systim_lock, flags); 96 spin_unlock_irqrestore(&adapter->systim_lock, flags);
100 97
101 return 0; 98 return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index eb088b129bc7..84ab9eea2768 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -97,7 +97,6 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
97 */ 97 */
98 if (dma_mapping_error(rx_ring->dev, dma)) { 98 if (dma_mapping_error(rx_ring->dev, dma)) {
99 __free_page(page); 99 __free_page(page);
100 bi->page = NULL;
101 100
102 rx_ring->rx_stats.alloc_failed++; 101 rx_ring->rx_stats.alloc_failed++;
103 return false; 102 return false;
@@ -147,8 +146,8 @@ void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
147 i -= rx_ring->count; 146 i -= rx_ring->count;
148 } 147 }
149 148
150 /* clear the hdr_addr for the next_to_use descriptor */ 149 /* clear the status bits for the next_to_use descriptor */
151 rx_desc->q.hdr_addr = 0; 150 rx_desc->d.staterr = 0;
152 151
153 cleaned_count--; 152 cleaned_count--;
154 } while (cleaned_count); 153 } while (cleaned_count);
@@ -194,7 +193,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
194 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 193 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
195 194
196 /* transfer page from old buffer to new buffer */ 195 /* transfer page from old buffer to new buffer */
197 memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer)); 196 *new_buff = *old_buff;
198 197
199 /* sync the buffer for use by the device */ 198 /* sync the buffer for use by the device */
200 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 199 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -203,12 +202,17 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
203 DMA_FROM_DEVICE); 202 DMA_FROM_DEVICE);
204} 203}
205 204
205static inline bool fm10k_page_is_reserved(struct page *page)
206{
207 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
208}
209
206static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 210static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
207 struct page *page, 211 struct page *page,
208 unsigned int truesize) 212 unsigned int truesize)
209{ 213{
210 /* avoid re-using remote pages */ 214 /* avoid re-using remote pages */
211 if (unlikely(page_to_nid(page) != numa_mem_id())) 215 if (unlikely(fm10k_page_is_reserved(page)))
212 return false; 216 return false;
213 217
214#if (PAGE_SIZE < 8192) 218#if (PAGE_SIZE < 8192)
@@ -218,22 +222,19 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
218 222
219 /* flip page offset to other buffer */ 223 /* flip page offset to other buffer */
220 rx_buffer->page_offset ^= FM10K_RX_BUFSZ; 224 rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
221
222 /* Even if we own the page, we are not allowed to use atomic_set()
223 * This would break get_page_unless_zero() users.
224 */
225 atomic_inc(&page->_count);
226#else 225#else
227 /* move offset up to the next cache line */ 226 /* move offset up to the next cache line */
228 rx_buffer->page_offset += truesize; 227 rx_buffer->page_offset += truesize;
229 228
230 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) 229 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
231 return false; 230 return false;
232
233 /* bump ref count on page before it is given to the stack */
234 get_page(page);
235#endif 231#endif
236 232
233 /* Even if we own the page, we are not allowed to use atomic_set()
234 * This would break get_page_unless_zero() users.
235 */
236 atomic_inc(&page->_count);
237
237 return true; 238 return true;
238} 239}
239 240
@@ -270,12 +271,12 @@ static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring,
270 271
271 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 272 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
272 273
273 /* we can reuse buffer as-is, just make sure it is local */ 274 /* page is not reserved, we can reuse buffer as-is */
274 if (likely(page_to_nid(page) == numa_mem_id())) 275 if (likely(!fm10k_page_is_reserved(page)))
275 return true; 276 return true;
276 277
277 /* this page cannot be reused so discard it */ 278 /* this page cannot be reused so discard it */
278 put_page(page); 279 __free_page(page);
279 return false; 280 return false;
280 } 281 }
281 282
@@ -293,7 +294,6 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
293 struct page *page; 294 struct page *page;
294 295
295 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; 296 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
296
297 page = rx_buffer->page; 297 page = rx_buffer->page;
298 prefetchw(page); 298 prefetchw(page);
299 299
@@ -727,6 +727,12 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
727 struct ethhdr *eth_hdr; 727 struct ethhdr *eth_hdr;
728 u8 l4_hdr = 0; 728 u8 l4_hdr = 0;
729 729
730/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
731#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164
732 if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
733 FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
734 return 0;
735
730 switch (vlan_get_protocol(skb)) { 736 switch (vlan_get_protocol(skb)) {
731 case htons(ETH_P_IP): 737 case htons(ETH_P_IP):
732 l4_hdr = ip_hdr(skb)->protocol; 738 l4_hdr = ip_hdr(skb)->protocol;
@@ -965,8 +971,8 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
965 tx_desc = FM10K_TX_DESC(tx_ring, i); 971 tx_desc = FM10K_TX_DESC(tx_ring, i);
966 972
967 /* add HW VLAN tag */ 973 /* add HW VLAN tag */
968 if (vlan_tx_tag_present(skb)) 974 if (skb_vlan_tag_present(skb))
969 tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 975 tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
970 else 976 else
971 tx_desc->vlan = 0; 977 tx_desc->vlan = 0;
972 978
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 14a4ea795c01..9f5457c9e627 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1194,12 +1194,11 @@ static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw,
1194{ 1194{
1195 const enum fm10k_mbx_state state = mbx->state; 1195 const enum fm10k_mbx_state state = mbx->state;
1196 const u32 *hdr = &mbx->mbx_hdr; 1196 const u32 *hdr = &mbx->mbx_hdr;
1197 u16 head, tail; 1197 u16 head;
1198 s32 err; 1198 s32 err;
1199 1199
1200 /* we will need to pull all of the fields for verification */ 1200 /* we will need to pull the header field for verification */
1201 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); 1201 head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD);
1202 tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL);
1203 1202
1204 /* We should not be receiving disconnect if Rx is incomplete */ 1203 /* We should not be receiving disconnect if Rx is incomplete */
1205 if (mbx->pushed) 1204 if (mbx->pushed)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 8811364b91cb..cfde8bac1aeb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -609,7 +609,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
609 int err; 609 int err;
610 610
611 if ((skb->protocol == htons(ETH_P_8021Q)) && 611 if ((skb->protocol == htons(ETH_P_8021Q)) &&
612 !vlan_tx_tag_present(skb)) { 612 !skb_vlan_tag_present(skb)) {
613 /* FM10K only supports hardware tagging, any tags in frame 613 /* FM10K only supports hardware tagging, any tags in frame
614 * are considered 2nd level or "outer" tags 614 * are considered 2nd level or "outer" tags
615 */ 615 */
@@ -1414,13 +1414,12 @@ struct net_device *fm10k_alloc_netdev(void)
1414 dev->vlan_features |= dev->features; 1414 dev->vlan_features |= dev->features;
1415 1415
1416 /* configure tunnel offloads */ 1416 /* configure tunnel offloads */
1417 dev->hw_enc_features = NETIF_F_IP_CSUM | 1417 dev->hw_enc_features |= NETIF_F_IP_CSUM |
1418 NETIF_F_TSO | 1418 NETIF_F_TSO |
1419 NETIF_F_TSO6 | 1419 NETIF_F_TSO6 |
1420 NETIF_F_TSO_ECN | 1420 NETIF_F_TSO_ECN |
1421 NETIF_F_GSO_UDP_TUNNEL | 1421 NETIF_F_GSO_UDP_TUNNEL |
1422 NETIF_F_IPV6_CSUM | 1422 NETIF_F_IPV6_CSUM;
1423 NETIF_F_SG;
1424 1423
1425 /* we want to leave these both on as we cannot disable VLAN tag 1424 /* we want to leave these both on as we cannot disable VLAN tag
1426 * insertion or stripping on the hardware since it is contained 1425 * insertion or stripping on the hardware since it is contained
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 275423d4f777..7e4711958e46 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -330,13 +330,10 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
330 struct fm10k_mac_update mac_update; 330 struct fm10k_mac_update mac_update;
331 u32 msg[5]; 331 u32 msg[5];
332 332
333 /* if glort is not valid return error */ 333 /* if glort or vlan are not valid return error */
334 if (!fm10k_glort_valid_pf(hw, glort)) 334 if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
335 return FM10K_ERR_PARAM; 335 return FM10K_ERR_PARAM;
336 336
337 /* drop upper 4 bits of VLAN ID */
338 vid = (vid << 4) >> 4;
339
340 /* record fields */ 337 /* record fields */
341 mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) | 338 mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
342 ((u32)mac[3] << 16) | 339 ((u32)mac[3] << 16) |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
index 7822809436a3..d966044e017a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
@@ -57,7 +57,6 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
57 struct sk_buff_head *list = &interface->ts_tx_skb_queue; 57 struct sk_buff_head *list = &interface->ts_tx_skb_queue;
58 struct sk_buff *clone; 58 struct sk_buff *clone;
59 unsigned long flags; 59 unsigned long flags;
60 __le16 dglort;
61 60
62 /* create clone for us to return on the Tx path */ 61 /* create clone for us to return on the Tx path */
63 clone = skb_clone_sk(skb); 62 clone = skb_clone_sk(skb);
@@ -65,8 +64,6 @@ void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
65 return; 64 return;
66 65
67 FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT; 66 FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
68 dglort = FM10K_CB(clone)->fi.w.dglort;
69
70 spin_lock_irqsave(&list->lock, flags); 67 spin_lock_irqsave(&list->lock, flags);
71 68
72 /* attempt to locate any buffers with the same dglort, 69 /* attempt to locate any buffers with the same dglort,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 280296f29154..7c6d9d5a8ae5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -354,7 +354,7 @@ struct fm10k_hw;
354 354
355/* Define timeouts for resets and disables */ 355/* Define timeouts for resets and disables */
356#define FM10K_QUEUE_DISABLE_TIMEOUT 100 356#define FM10K_QUEUE_DISABLE_TIMEOUT 100
357#define FM10K_RESET_TIMEOUT 100 357#define FM10K_RESET_TIMEOUT 150
358 358
359/* VF registers */ 359/* VF registers */
360#define FM10K_VFCTRL 0x00000 360#define FM10K_VFCTRL 0x00000
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index fc50f6461b13..2b65cdcad6ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -87,11 +87,12 @@
87#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ 87#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
88#endif /* I40E_FCOE */ 88#endif /* I40E_FCOE */
89#define I40E_MAX_AQ_BUF_SIZE 4096 89#define I40E_MAX_AQ_BUF_SIZE 4096
90#define I40E_AQ_LEN 128 90#define I40E_AQ_LEN 256
91#define I40E_AQ_WORK_LIMIT 16 91#define I40E_AQ_WORK_LIMIT 32
92#define I40E_MAX_USER_PRIORITY 8 92#define I40E_MAX_USER_PRIORITY 8
93#define I40E_DEFAULT_MSG_ENABLE 4 93#define I40E_DEFAULT_MSG_ENABLE 4
94#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 94#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
95#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
95 96
96#define I40E_NVM_VERSION_LO_SHIFT 0 97#define I40E_NVM_VERSION_LO_SHIFT 0
97#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) 98#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -147,6 +148,7 @@ enum i40e_state_t {
147 __I40E_FD_FLUSH_REQUESTED, 148 __I40E_FD_FLUSH_REQUESTED,
148 __I40E_RESET_FAILED, 149 __I40E_RESET_FAILED,
149 __I40E_PORT_TX_SUSPENDED, 150 __I40E_PORT_TX_SUSPENDED,
151 __I40E_VF_DISABLE,
150}; 152};
151 153
152enum i40e_interrupt_policy { 154enum i40e_interrupt_policy {
@@ -268,7 +270,7 @@ struct i40e_pf {
268 u16 rx_itr_default; 270 u16 rx_itr_default;
269 u16 tx_itr_default; 271 u16 tx_itr_default;
270 u16 msg_enable; 272 u16 msg_enable;
271 char misc_int_name[IFNAMSIZ + 9]; 273 char int_name[I40E_INT_NAME_STR_LEN];
272 u16 adminq_work_limit; /* num of admin receive queue desc to process */ 274 u16 adminq_work_limit; /* num of admin receive queue desc to process */
273 unsigned long service_timer_period; 275 unsigned long service_timer_period;
274 unsigned long service_timer_previous; 276 unsigned long service_timer_previous;
@@ -524,7 +526,7 @@ struct i40e_q_vector {
524 526
525 cpumask_t affinity_mask; 527 cpumask_t affinity_mask;
526 struct rcu_head rcu; /* to avoid race with update stats on free */ 528 struct rcu_head rcu; /* to avoid race with update stats on free */
527 char name[IFNAMSIZ + 9]; 529 char name[I40E_INT_NAME_STR_LEN];
528} ____cacheline_internodealigned_in_smp; 530} ____cacheline_internodealigned_in_smp;
529 531
530/* lan device */ 532/* lan device */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 564d0b0192f7..de17b6fbcc4e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -148,7 +148,7 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
148 148
149/* general information */ 149/* general information */
150#define I40E_AQ_LARGE_BUF 512 150#define I40E_AQ_LARGE_BUF 512
151#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ 151#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
152 152
153void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 153void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
154 u16 opcode); 154 u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 8835aeeff23e..929e3d72a01e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -256,6 +256,8 @@ enum i40e_admin_queue_opc {
256 i40e_aqc_opc_lldp_stop = 0x0A05, 256 i40e_aqc_opc_lldp_stop = 0x0A05,
257 i40e_aqc_opc_lldp_start = 0x0A06, 257 i40e_aqc_opc_lldp_start = 0x0A06,
258 i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, 258 i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
259 i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
260 i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
259 261
260 /* Tunnel commands */ 262 /* Tunnel commands */
261 i40e_aqc_opc_add_udp_tunnel = 0x0B00, 263 i40e_aqc_opc_add_udp_tunnel = 0x0B00,
@@ -268,6 +270,8 @@ enum i40e_admin_queue_opc {
268 /* OEM commands */ 270 /* OEM commands */
269 i40e_aqc_opc_oem_parameter_change = 0xFE00, 271 i40e_aqc_opc_oem_parameter_change = 0xFE00,
270 i40e_aqc_opc_oem_device_status_change = 0xFE01, 272 i40e_aqc_opc_oem_device_status_change = 0xFE01,
273 i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
274 i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
271 275
272 /* debug commands */ 276 /* debug commands */
273 i40e_aqc_opc_debug_get_deviceid = 0xFF00, 277 i40e_aqc_opc_debug_get_deviceid = 0xFF00,
@@ -276,7 +280,6 @@ enum i40e_admin_queue_opc {
276 i40e_aqc_opc_debug_write_reg = 0xFF04, 280 i40e_aqc_opc_debug_write_reg = 0xFF04,
277 i40e_aqc_opc_debug_modify_reg = 0xFF07, 281 i40e_aqc_opc_debug_modify_reg = 0xFF07,
278 i40e_aqc_opc_debug_dump_internals = 0xFF08, 282 i40e_aqc_opc_debug_dump_internals = 0xFF08,
279 i40e_aqc_opc_debug_modify_internals = 0xFF09,
280}; 283};
281 284
282/* command structures and indirect data structures */ 285/* command structures and indirect data structures */
@@ -410,6 +413,7 @@ struct i40e_aqc_list_capabilities_element_resp {
410#define I40E_AQ_CAP_ID_VSI 0x0017 413#define I40E_AQ_CAP_ID_VSI 0x0017
411#define I40E_AQ_CAP_ID_DCB 0x0018 414#define I40E_AQ_CAP_ID_DCB 0x0018
412#define I40E_AQ_CAP_ID_FCOE 0x0021 415#define I40E_AQ_CAP_ID_FCOE 0x0021
416#define I40E_AQ_CAP_ID_ISCSI 0x0022
413#define I40E_AQ_CAP_ID_RSS 0x0040 417#define I40E_AQ_CAP_ID_RSS 0x0040
414#define I40E_AQ_CAP_ID_RXQ 0x0041 418#define I40E_AQ_CAP_ID_RXQ 0x0041
415#define I40E_AQ_CAP_ID_TXQ 0x0042 419#define I40E_AQ_CAP_ID_TXQ 0x0042
@@ -454,8 +458,11 @@ struct i40e_aqc_arp_proxy_data {
454 __le32 pfpm_proxyfc; 458 __le32 pfpm_proxyfc;
455 __le32 ip_addr; 459 __le32 ip_addr;
456 u8 mac_addr[6]; 460 u8 mac_addr[6];
461 u8 reserved[2];
457}; 462};
458 463
464I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
465
459/* Set NS Proxy Table Entry Command (indirect 0x0105) */ 466/* Set NS Proxy Table Entry Command (indirect 0x0105) */
460struct i40e_aqc_ns_proxy_data { 467struct i40e_aqc_ns_proxy_data {
461 __le16 table_idx_mac_addr_0; 468 __le16 table_idx_mac_addr_0;
@@ -481,6 +488,8 @@ struct i40e_aqc_ns_proxy_data {
481 u8 ipv6_addr_1[16]; 488 u8 ipv6_addr_1[16];
482}; 489};
483 490
491I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
492
484/* Manage LAA Command (0x0106) - obsolete */ 493/* Manage LAA Command (0x0106) - obsolete */
485struct i40e_aqc_mng_laa { 494struct i40e_aqc_mng_laa {
486 __le16 command_flags; 495 __le16 command_flags;
@@ -491,6 +500,8 @@ struct i40e_aqc_mng_laa {
491 u8 reserved2[6]; 500 u8 reserved2[6];
492}; 501};
493 502
503I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
504
494/* Manage MAC Address Read Command (indirect 0x0107) */ 505/* Manage MAC Address Read Command (indirect 0x0107) */
495struct i40e_aqc_mac_address_read { 506struct i40e_aqc_mac_address_read {
496 __le16 command_flags; 507 __le16 command_flags;
@@ -562,6 +573,8 @@ struct i40e_aqc_get_switch_config_header_resp {
562 u8 reserved[12]; 573 u8 reserved[12];
563}; 574};
564 575
576I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
577
565struct i40e_aqc_switch_config_element_resp { 578struct i40e_aqc_switch_config_element_resp {
566 u8 element_type; 579 u8 element_type;
567#define I40E_AQ_SW_ELEM_TYPE_MAC 1 580#define I40E_AQ_SW_ELEM_TYPE_MAC 1
@@ -587,6 +600,8 @@ struct i40e_aqc_switch_config_element_resp {
587 __le16 element_info; 600 __le16 element_info;
588}; 601};
589 602
603I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
604
590/* Get Switch Configuration (indirect 0x0200) 605/* Get Switch Configuration (indirect 0x0200)
591 * an array of elements are returned in the response buffer 606 * an array of elements are returned in the response buffer
592 * the first in the array is the header, remainder are elements 607 * the first in the array is the header, remainder are elements
@@ -596,6 +611,8 @@ struct i40e_aqc_get_switch_config_resp {
596 struct i40e_aqc_switch_config_element_resp element[1]; 611 struct i40e_aqc_switch_config_element_resp element[1];
597}; 612};
598 613
614I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
615
599/* Add Statistics (direct 0x0201) 616/* Add Statistics (direct 0x0201)
600 * Remove Statistics (direct 0x0202) 617 * Remove Statistics (direct 0x0202)
601 */ 618 */
@@ -661,6 +678,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
661 u8 reserved2[6]; 678 u8 reserved2[6];
662}; 679};
663 680
681I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
682
664/* Add VSI (indirect 0x0210) 683/* Add VSI (indirect 0x0210)
665 * this indirect command uses struct i40e_aqc_vsi_properties_data 684 * this indirect command uses struct i40e_aqc_vsi_properties_data
666 * as the indirect buffer (128 bytes) 685 * as the indirect buffer (128 bytes)
@@ -1092,6 +1111,8 @@ struct i40e_aqc_remove_tag {
1092 u8 reserved[12]; 1111 u8 reserved[12];
1093}; 1112};
1094 1113
1114I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
1115
1095/* Add multicast E-Tag (direct 0x0257) 1116/* Add multicast E-Tag (direct 0x0257)
1096 * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields 1117 * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
1097 * and no external data 1118 * and no external data
@@ -1207,7 +1228,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1207 } ipaddr; 1228 } ipaddr;
1208 __le16 flags; 1229 __le16 flags;
1209#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 1230#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
1210#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ 1231#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
1211 I40E_AQC_ADD_CLOUD_FILTER_SHIFT) 1232 I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
1212/* 0x0000 reserved */ 1233/* 0x0000 reserved */
1213#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 1234#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
@@ -1240,7 +1261,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1240 u8 reserved[4]; 1261 u8 reserved[4];
1241 __le16 queue_number; 1262 __le16 queue_number;
1242#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 1263#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
1243#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ 1264#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
1244 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) 1265 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
1245 u8 reserved2[14]; 1266 u8 reserved2[14];
1246 /* response section */ 1267 /* response section */
@@ -1359,6 +1380,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
1359 u8 reserved1[28]; 1380 u8 reserved1[28];
1360}; 1381};
1361 1382
1383I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
1384
1362/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) 1385/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
1363 * responds with i40e_aqc_qs_handles_resp 1386 * responds with i40e_aqc_qs_handles_resp
1364 */ 1387 */
@@ -1370,6 +1393,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
1370 __le16 qs_handles[8]; 1393 __le16 qs_handles[8];
1371}; 1394};
1372 1395
1396I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
1397
1373/* Query vsi bw configuration (indirect 0x0408) */ 1398/* Query vsi bw configuration (indirect 0x0408) */
1374struct i40e_aqc_query_vsi_bw_config_resp { 1399struct i40e_aqc_query_vsi_bw_config_resp {
1375 u8 tc_valid_bits; 1400 u8 tc_valid_bits;
@@ -1383,6 +1408,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
1383 u8 reserved3[23]; 1408 u8 reserved3[23];
1384}; 1409};
1385 1410
1411I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
1412
1386/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ 1413/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
1387struct i40e_aqc_query_vsi_ets_sla_config_resp { 1414struct i40e_aqc_query_vsi_ets_sla_config_resp {
1388 u8 tc_valid_bits; 1415 u8 tc_valid_bits;
@@ -1394,6 +1421,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
1394 __le16 tc_bw_max[2]; 1421 __le16 tc_bw_max[2];
1395}; 1422};
1396 1423
1424I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
1425
1397/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ 1426/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
1398struct i40e_aqc_configure_switching_comp_bw_limit { 1427struct i40e_aqc_configure_switching_comp_bw_limit {
1399 __le16 seid; 1428 __le16 seid;
@@ -1421,6 +1450,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
1421 u8 reserved2[96]; 1450 u8 reserved2[96];
1422}; 1451};
1423 1452
1453I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
1454
1424/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ 1455/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
1425struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { 1456struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
1426 u8 tc_valid_bits; 1457 u8 tc_valid_bits;
@@ -1432,6 +1463,9 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
1432 u8 reserved1[28]; 1463 u8 reserved1[28];
1433}; 1464};
1434 1465
1466I40E_CHECK_STRUCT_LEN(0x40,
1467 i40e_aqc_configure_switching_comp_ets_bw_limit_data);
1468
1435/* Configure Switching Component Bandwidth Allocation per Tc 1469/* Configure Switching Component Bandwidth Allocation per Tc
1436 * (indirect 0x0417) 1470 * (indirect 0x0417)
1437 */ 1471 */
@@ -1443,6 +1477,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
1443 u8 reserved1[20]; 1477 u8 reserved1[20];
1444}; 1478};
1445 1479
1480I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
1481
1446/* Query Switching Component Configuration (indirect 0x0418) */ 1482/* Query Switching Component Configuration (indirect 0x0418) */
1447struct i40e_aqc_query_switching_comp_ets_config_resp { 1483struct i40e_aqc_query_switching_comp_ets_config_resp {
1448 u8 tc_valid_bits; 1484 u8 tc_valid_bits;
@@ -1453,6 +1489,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
1453 u8 reserved2[23]; 1489 u8 reserved2[23];
1454}; 1490};
1455 1491
1492I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
1493
1456/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ 1494/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
1457struct i40e_aqc_query_port_ets_config_resp { 1495struct i40e_aqc_query_port_ets_config_resp {
1458 u8 reserved[4]; 1496 u8 reserved[4];
@@ -1468,6 +1506,8 @@ struct i40e_aqc_query_port_ets_config_resp {
1468 u8 reserved3[32]; 1506 u8 reserved3[32];
1469}; 1507};
1470 1508
1509I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
1510
1471/* Query Switching Component Bandwidth Allocation per Traffic Type 1511/* Query Switching Component Bandwidth Allocation per Traffic Type
1472 * (indirect 0x041A) 1512 * (indirect 0x041A)
1473 */ 1513 */
@@ -1482,6 +1522,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
1482 __le16 tc_bw_max[2]; 1522 __le16 tc_bw_max[2];
1483}; 1523};
1484 1524
1525I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
1526
1485/* Suspend/resume port TX traffic 1527/* Suspend/resume port TX traffic
1486 * (direct 0x041B and 0x041C) uses the generic SEID struct 1528 * (direct 0x041B and 0x041C) uses the generic SEID struct
1487 */ 1529 */
@@ -1495,6 +1537,8 @@ struct i40e_aqc_configure_partition_bw_data {
1495 u8 max_bw[16]; /* bandwidth limit */ 1537 u8 max_bw[16]; /* bandwidth limit */
1496}; 1538};
1497 1539
1540I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
1541
1498/* Get and set the active HMC resource profile and status. 1542/* Get and set the active HMC resource profile and status.
1499 * (direct 0x0500) and (direct 0x0501) 1543 * (direct 0x0500) and (direct 0x0501)
1500 */ 1544 */
@@ -1577,6 +1621,8 @@ struct i40e_aqc_module_desc {
1577 u8 reserved2[8]; 1621 u8 reserved2[8];
1578}; 1622};
1579 1623
1624I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
1625
1580struct i40e_aq_get_phy_abilities_resp { 1626struct i40e_aq_get_phy_abilities_resp {
1581 __le32 phy_type; /* bitmap using the above enum for offsets */ 1627 __le32 phy_type; /* bitmap using the above enum for offsets */
1582 u8 link_speed; /* bitmap using the above enum bit patterns */ 1628 u8 link_speed; /* bitmap using the above enum bit patterns */
@@ -1605,6 +1651,8 @@ struct i40e_aq_get_phy_abilities_resp {
1605 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; 1651 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
1606}; 1652};
1607 1653
1654I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
1655
1608/* Set PHY Config (direct 0x0601) */ 1656/* Set PHY Config (direct 0x0601) */
1609struct i40e_aq_set_phy_config { /* same bits as above in all */ 1657struct i40e_aq_set_phy_config { /* same bits as above in all */
1610 __le32 phy_type; 1658 __le32 phy_type;
@@ -1788,12 +1836,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1788/* NVM Config Read (indirect 0x0704) */ 1836/* NVM Config Read (indirect 0x0704) */
1789struct i40e_aqc_nvm_config_read { 1837struct i40e_aqc_nvm_config_read {
1790 __le16 cmd_flags; 1838 __le16 cmd_flags;
1791#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 1839#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
1792#define ANVM_READ_SINGLE_FEATURE 0 1840#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
1793#define ANVM_READ_MULTIPLE_FEATURES 1 1841#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
1794 __le16 element_count; 1842 __le16 element_count;
1795 __le16 element_id; /* Feature/field ID */ 1843 __le16 element_id; /* Feature/field ID */
1796 u8 reserved[2]; 1844 __le16 element_id_msw; /* MSWord of field ID */
1797 __le32 address_high; 1845 __le32 address_high;
1798 __le32 address_low; 1846 __le32 address_low;
1799}; 1847};
@@ -1811,21 +1859,32 @@ struct i40e_aqc_nvm_config_write {
1811 1859
1812I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); 1860I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
1813 1861
1862/* Used for 0x0704 as well as for 0x0705 commands */
1863#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
1864#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
1865 (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
1866#define I40E_AQ_ANVM_FEATURE 0
1867#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
1814struct i40e_aqc_nvm_config_data_feature { 1868struct i40e_aqc_nvm_config_data_feature {
1815 __le16 feature_id; 1869 __le16 feature_id;
1816 __le16 instance_id; 1870#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
1871#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
1872#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
1817 __le16 feature_options; 1873 __le16 feature_options;
1818 __le16 feature_selection; 1874 __le16 feature_selection;
1819}; 1875};
1820 1876
1877I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
1878
1821struct i40e_aqc_nvm_config_data_immediate_field { 1879struct i40e_aqc_nvm_config_data_immediate_field {
1822#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2 1880 __le32 field_id;
1823 __le16 field_id; 1881 __le32 field_value;
1824 __le16 instance_id;
1825 __le16 field_options; 1882 __le16 field_options;
1826 __le16 field_value; 1883 __le16 reserved;
1827}; 1884};
1828 1885
1886I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
1887
1829/* Send to PF command (indirect 0x0801) id is only used by PF 1888/* Send to PF command (indirect 0x0801) id is only used by PF
1830 * Send to VF command (indirect 0x0802) id is only used by PF 1889 * Send to VF command (indirect 0x0802) id is only used by PF
1831 * Send to Peer PF command (indirect 0x0803) 1890 * Send to Peer PF command (indirect 0x0803)
@@ -2026,12 +2085,54 @@ struct i40e_aqc_get_cee_dcb_cfg_resp {
2026 u8 oper_tc_bw[8]; 2085 u8 oper_tc_bw[8];
2027 u8 oper_pfc_en; 2086 u8 oper_pfc_en;
2028 __le16 oper_app_prio; 2087 __le16 oper_app_prio;
2088#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0
2089#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
2090#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3
2091#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
2092#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
2093#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
2094#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
2029 __le32 tlv_status; 2095 __le32 tlv_status;
2096#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
2097#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
2098#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
2099#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
2100#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
2101#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
2030 u8 reserved[12]; 2102 u8 reserved[12];
2031}; 2103};
2032 2104
2033I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); 2105I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
2034 2106
2107/* Set Local LLDP MIB (indirect 0x0A08)
2108 * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
2109 */
2110struct i40e_aqc_lldp_set_local_mib {
2111#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
2112#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
2113 u8 type;
2114 u8 reserved0;
2115 __le16 length;
2116 u8 reserved1[4];
2117 __le32 address_high;
2118 __le32 address_low;
2119};
2120
2121I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
2122
2123/* Stop/Start LLDP Agent (direct 0x0A09)
2124 * Used for stopping/starting specific LLDP agent. e.g. DCBx
2125 */
2126struct i40e_aqc_lldp_stop_start_specific_agent {
2127#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
2128#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
2129 (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
2130 u8 command;
2131 u8 reserved[15];
2132};
2133
2134I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
2135
2035/* Add Udp Tunnel command and completion (direct 0x0B00) */ 2136/* Add Udp Tunnel command and completion (direct 0x0B00) */
2036struct i40e_aqc_add_udp_tunnel { 2137struct i40e_aqc_add_udp_tunnel {
2037 __le16 udp_port; 2138 __le16 udp_port;
@@ -2106,7 +2207,8 @@ struct i40e_aqc_oem_param_change {
2106#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 2207#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
2107#define I40E_AQ_OEM_PARAM_MAC 2 2208#define I40E_AQ_OEM_PARAM_MAC 2
2108 __le32 param_value1; 2209 __le32 param_value1;
2109 u8 param_value2[8]; 2210 __le16 param_value2;
2211 u8 reserved[6];
2110}; 2212};
2111 2213
2112I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); 2214I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@@ -2120,6 +2222,28 @@ struct i40e_aqc_oem_state_change {
2120 2222
2121I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); 2223I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
2122 2224
2225/* Initialize OCSD (0xFE02, direct) */
2226struct i40e_aqc_opc_oem_ocsd_initialize {
2227 u8 type_status;
2228 u8 reserved1[3];
2229 __le32 ocsd_memory_block_addr_high;
2230 __le32 ocsd_memory_block_addr_low;
2231 __le32 requested_update_interval;
2232};
2233
2234I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
2235
2236/* Initialize OCBB (0xFE03, direct) */
2237struct i40e_aqc_opc_oem_ocbb_initialize {
2238 u8 type_status;
2239 u8 reserved1[3];
2240 __le32 ocbb_memory_block_addr_high;
2241 __le32 ocbb_memory_block_addr_low;
2242 u8 reserved2[4];
2243};
2244
2245I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
2246
2123/* debug commands */ 2247/* debug commands */
2124 2248
2125/* get device id (0xFF00) uses the generic structure */ 2249/* get device id (0xFF00) uses the generic structure */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 3d741ee99a2c..11a9ffebf8d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -742,6 +742,65 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
742#endif 742#endif
743 743
744/** 744/**
745 * i40e_read_pba_string - Reads part number string from EEPROM
746 * @hw: pointer to hardware structure
747 * @pba_num: stores the part number string from the EEPROM
748 * @pba_num_size: part number string buffer length
749 *
750 * Reads the part number string from the EEPROM.
751 **/
752i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
753 u32 pba_num_size)
754{
755 i40e_status status = 0;
756 u16 pba_word = 0;
757 u16 pba_size = 0;
758 u16 pba_ptr = 0;
759 u16 i = 0;
760
761 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
762 if (status || (pba_word != 0xFAFA)) {
763 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
764 return status;
765 }
766
767 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
768 if (status) {
769 hw_dbg(hw, "Failed to read PBA Block pointer.\n");
770 return status;
771 }
772
773 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
774 if (status) {
775 hw_dbg(hw, "Failed to read PBA Block size.\n");
776 return status;
777 }
778
779 /* Subtract one to get PBA word count (PBA Size word is included in
780 * total size)
781 */
782 pba_size--;
783 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
784 hw_dbg(hw, "Buffer to small for PBA data.\n");
785 return I40E_ERR_PARAM;
786 }
787
788 for (i = 0; i < pba_size; i++) {
789 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
790 if (status) {
791 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
792 return status;
793 }
794
795 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
796 pba_num[(i * 2) + 1] = pba_word & 0xFF;
797 }
798 pba_num[(pba_size * 2)] = '\0';
799
800 return status;
801}
802
803/**
745 * i40e_get_media_type - Gets media type 804 * i40e_get_media_type - Gets media type
746 * @hw: pointer to the hardware structure 805 * @hw: pointer to the hardware structure
747 **/ 806 **/
@@ -1083,8 +1142,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1083 if (mode == I40E_LINK_ACTIVITY) 1142 if (mode == I40E_LINK_ACTIVITY)
1084 blink = false; 1143 blink = false;
1085 1144
1086 gpio_val |= (blink ? 1 : 0) << 1145 if (blink)
1087 I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT; 1146 gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1147 else
1148 gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1088 1149
1089 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1150 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1090 break; 1151 break;
@@ -2035,6 +2096,43 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2035} 2096}
2036 2097
2037/** 2098/**
2099 * i40e_aq_debug_read_register
2100 * @hw: pointer to the hw struct
2101 * @reg_addr: register address
2102 * @reg_val: register value
2103 * @cmd_details: pointer to command details structure or NULL
2104 *
2105 * Read the register using the admin queue commands
2106 **/
2107i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
2108 u32 reg_addr, u64 *reg_val,
2109 struct i40e_asq_cmd_details *cmd_details)
2110{
2111 struct i40e_aq_desc desc;
2112 struct i40e_aqc_debug_reg_read_write *cmd_resp =
2113 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
2114 i40e_status status;
2115
2116 if (reg_val == NULL)
2117 return I40E_ERR_PARAM;
2118
2119 i40e_fill_default_direct_cmd_desc(&desc,
2120 i40e_aqc_opc_debug_read_reg);
2121
2122 cmd_resp->address = cpu_to_le32(reg_addr);
2123
2124 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2125
2126 if (!status) {
2127 *reg_val = ((u64)cmd_resp->value_high << 32) |
2128 (u64)cmd_resp->value_low;
2129 *reg_val = le64_to_cpu(*reg_val);
2130 }
2131
2132 return status;
2133}
2134
2135/**
2038 * i40e_aq_debug_write_register 2136 * i40e_aq_debug_write_register
2039 * @hw: pointer to the hw struct 2137 * @hw: pointer to the hw struct
2040 * @reg_addr: register address 2138 * @reg_addr: register address
@@ -2264,6 +2362,7 @@ i40e_aq_erase_nvm_exit:
2264#define I40E_DEV_FUNC_CAP_VSI 0x17 2362#define I40E_DEV_FUNC_CAP_VSI 0x17
2265#define I40E_DEV_FUNC_CAP_DCB 0x18 2363#define I40E_DEV_FUNC_CAP_DCB 0x18
2266#define I40E_DEV_FUNC_CAP_FCOE 0x21 2364#define I40E_DEV_FUNC_CAP_FCOE 0x21
2365#define I40E_DEV_FUNC_CAP_ISCSI 0x22
2267#define I40E_DEV_FUNC_CAP_RSS 0x40 2366#define I40E_DEV_FUNC_CAP_RSS 0x40
2268#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41 2367#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
2269#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42 2368#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
@@ -2292,6 +2391,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
2292 enum i40e_admin_queue_opc list_type_opc) 2391 enum i40e_admin_queue_opc list_type_opc)
2293{ 2392{
2294 struct i40e_aqc_list_capabilities_element_resp *cap; 2393 struct i40e_aqc_list_capabilities_element_resp *cap;
2394 u32 valid_functions, num_functions;
2295 u32 number, logical_id, phys_id; 2395 u32 number, logical_id, phys_id;
2296 struct i40e_hw_capabilities *p; 2396 struct i40e_hw_capabilities *p;
2297 u32 i = 0; 2397 u32 i = 0;
@@ -2362,6 +2462,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
2362 if (number == 1) 2462 if (number == 1)
2363 p->fcoe = true; 2463 p->fcoe = true;
2364 break; 2464 break;
2465 case I40E_DEV_FUNC_CAP_ISCSI:
2466 if (number == 1)
2467 p->iscsi = true;
2468 break;
2365 case I40E_DEV_FUNC_CAP_RSS: 2469 case I40E_DEV_FUNC_CAP_RSS:
2366 p->rss = true; 2470 p->rss = true;
2367 p->rss_table_size = number; 2471 p->rss_table_size = number;
@@ -2427,6 +2531,34 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
2427 if (p->npar_enable || p->mfp_mode_1) 2531 if (p->npar_enable || p->mfp_mode_1)
2428 p->fcoe = false; 2532 p->fcoe = false;
2429 2533
2534 /* count the enabled ports (aka the "not disabled" ports) */
2535 hw->num_ports = 0;
2536 for (i = 0; i < 4; i++) {
2537 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
2538 u64 port_cfg = 0;
2539
2540 /* use AQ read to get the physical register offset instead
2541 * of the port relative offset
2542 */
2543 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
2544 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
2545 hw->num_ports++;
2546 }
2547
2548 valid_functions = p->valid_functions;
2549 num_functions = 0;
2550 while (valid_functions) {
2551 if (valid_functions & 1)
2552 num_functions++;
2553 valid_functions >>= 1;
2554 }
2555
2556 /* partition id is 1-based, and functions are evenly spread
2557 * across the ports as partitions
2558 */
2559 hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
2560 hw->num_partitions = num_functions / hw->num_ports;
2561
2430 /* additional HW specific goodies that might 2562 /* additional HW specific goodies that might
2431 * someday be HW version specific 2563 * someday be HW version specific
2432 */ 2564 */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index cb0de455683e..61236f983971 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1890,7 +1890,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1890 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); 1890 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1891 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); 1891 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1892 dev_info(&pf->pdev->dev, " dump desc aq\n"); 1892 dev_info(&pf->pdev->dev, " dump desc aq\n");
1893 dev_info(&pf->pdev->dev, " dump stats\n");
1894 dev_info(&pf->pdev->dev, " dump reset stats\n"); 1893 dev_info(&pf->pdev->dev, " dump reset stats\n");
1895 dev_info(&pf->pdev->dev, " msg_enable [level]\n"); 1894 dev_info(&pf->pdev->dev, " msg_enable [level]\n");
1896 dev_info(&pf->pdev->dev, " read <reg>\n"); 1895 dev_info(&pf->pdev->dev, " read <reg>\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 951e8767fc50..b8230dc205ec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -219,6 +219,16 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
219#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) 219#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
220 220
221/** 221/**
222 * i40e_partition_setting_complaint - generic complaint for MFP restriction
223 * @pf: the PF struct
224 **/
225static void i40e_partition_setting_complaint(struct i40e_pf *pf)
226{
227 dev_info(&pf->pdev->dev,
228 "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n");
229}
230
231/**
222 * i40e_get_settings - Get Link Speed and Duplex settings 232 * i40e_get_settings - Get Link Speed and Duplex settings
223 * @netdev: network interface device structure 233 * @netdev: network interface device structure
224 * @ecmd: ethtool command 234 * @ecmd: ethtool command
@@ -485,6 +495,14 @@ static int i40e_set_settings(struct net_device *netdev,
485 u8 autoneg; 495 u8 autoneg;
486 u32 advertise; 496 u32 advertise;
487 497
498 /* Changing port settings is not supported if this isn't the
499 * port's controlling PF
500 */
501 if (hw->partition_id != 1) {
502 i40e_partition_setting_complaint(pf);
503 return -EOPNOTSUPP;
504 }
505
488 if (vsi != pf->vsi[pf->lan_vsi]) 506 if (vsi != pf->vsi[pf->lan_vsi])
489 return -EOPNOTSUPP; 507 return -EOPNOTSUPP;
490 508
@@ -687,6 +705,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
687 u8 aq_failures; 705 u8 aq_failures;
688 int err = 0; 706 int err = 0;
689 707
708 /* Changing the port's flow control is not supported if this isn't the
709 * port's controlling PF
710 */
711 if (hw->partition_id != 1) {
712 i40e_partition_setting_complaint(pf);
713 return -EOPNOTSUPP;
714 }
715
690 if (vsi != pf->vsi[pf->lan_vsi]) 716 if (vsi != pf->vsi[pf->lan_vsi])
691 return -EOPNOTSUPP; 717 return -EOPNOTSUPP;
692 718
@@ -1503,7 +1529,7 @@ static void i40e_get_wol(struct net_device *netdev,
1503 1529
1504 /* NVM bit on means WoL disabled for the port */ 1530 /* NVM bit on means WoL disabled for the port */
1505 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 1531 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1506 if ((1 << hw->port) & wol_nvm_bits) { 1532 if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
1507 wol->supported = 0; 1533 wol->supported = 0;
1508 wol->wolopts = 0; 1534 wol->wolopts = 0;
1509 } else { 1535 } else {
@@ -1512,13 +1538,28 @@ static void i40e_get_wol(struct net_device *netdev,
1512 } 1538 }
1513} 1539}
1514 1540
1541/**
1542 * i40e_set_wol - set the WakeOnLAN configuration
1543 * @netdev: the netdev in question
1544 * @wol: the ethtool WoL setting data
1545 **/
1515static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1546static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1516{ 1547{
1517 struct i40e_netdev_priv *np = netdev_priv(netdev); 1548 struct i40e_netdev_priv *np = netdev_priv(netdev);
1518 struct i40e_pf *pf = np->vsi->back; 1549 struct i40e_pf *pf = np->vsi->back;
1550 struct i40e_vsi *vsi = np->vsi;
1519 struct i40e_hw *hw = &pf->hw; 1551 struct i40e_hw *hw = &pf->hw;
1520 u16 wol_nvm_bits; 1552 u16 wol_nvm_bits;
1521 1553
1554 /* WoL not supported if this isn't the controlling PF on the port */
1555 if (hw->partition_id != 1) {
1556 i40e_partition_setting_complaint(pf);
1557 return -EOPNOTSUPP;
1558 }
1559
1560 if (vsi != pf->vsi[pf->lan_vsi])
1561 return -EOPNOTSUPP;
1562
1522 /* NVM bit on means WoL disabled for the port */ 1563 /* NVM bit on means WoL disabled for the port */
1523 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 1564 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
1524 if (((1 << hw->port) & wol_nvm_bits)) 1565 if (((1 << hw->port) & wol_nvm_bits))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index a8b8bd95108d..27c206e62da7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -39,15 +39,6 @@
39#include "i40e_fcoe.h" 39#include "i40e_fcoe.h"
40 40
41/** 41/**
42 * i40e_rx_is_fip - returns true if the rx packet type is FIP
43 * @ptype: the packet type field from rx descriptor write-back
44 **/
45static inline bool i40e_rx_is_fip(u16 ptype)
46{
47 return ptype == I40E_RX_PTYPE_L2_FIP_PAY2;
48}
49
50/**
51 * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE 42 * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
52 * @ptype: the packet type field from rx descriptor write-back 43 * @ptype: the packet type field from rx descriptor write-back
53 **/ 44 **/
@@ -404,6 +395,7 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
404 I40E_AQ_VSI_PROP_INGRESS_UP_VALID | 395 I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
405 I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); 396 I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
406 397
398 info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
407 enabled_tc = i40e_get_fcoe_tc_map(pf); 399 enabled_tc = i40e_get_fcoe_tc_map(pf);
408 i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); 400 i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
409 401
@@ -1511,12 +1503,16 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
1511 strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); 1503 strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
1512 netdev->mtu = FCOE_MTU; 1504 netdev->mtu = FCOE_MTU;
1513 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 1505 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
1506 /* set different dev_port value 1 for FCoE netdev than the default
1507 * zero dev_port value for PF netdev, this helps biosdevname user
1508 * tool to differentiate them correctly while both attached to the
1509 * same PCI function.
1510 */
1511 netdev->dev_port = 1;
1514 i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false); 1512 i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
1515 i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false); 1513 i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
1516 i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false); 1514 i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
1517 i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false); 1515 i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
1518 i40e_add_filter(vsi, FIP_ALL_VN2VN_MACS, 0, false, false);
1519 i40e_add_filter(vsi, FIP_ALL_P2P_MACS, 0, false, false);
1520 1516
1521 /* use san mac */ 1517 /* use san mac */
1522 ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); 1518 ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a5f2660d552d..cbe281be1c9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
39 39
40#define DRV_VERSION_MAJOR 1 40#define DRV_VERSION_MAJOR 1
41#define DRV_VERSION_MINOR 2 41#define DRV_VERSION_MINOR 2
42#define DRV_VERSION_BUILD 2 42#define DRV_VERSION_BUILD 6
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -2819,8 +2819,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2819 * i40e_enable_misc_int_causes - enable the non-queue interrupts 2819 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2820 * @hw: ptr to the hardware info 2820 * @hw: ptr to the hardware info
2821 **/ 2821 **/
2822static void i40e_enable_misc_int_causes(struct i40e_hw *hw) 2822static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
2823{ 2823{
2824 struct i40e_hw *hw = &pf->hw;
2824 u32 val; 2825 u32 val;
2825 2826
2826 /* clear things first */ 2827 /* clear things first */
@@ -2832,11 +2833,13 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2832 I40E_PFINT_ICR0_ENA_GRST_MASK | 2833 I40E_PFINT_ICR0_ENA_GRST_MASK |
2833 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | 2834 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2834 I40E_PFINT_ICR0_ENA_GPIO_MASK | 2835 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2835 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
2836 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | 2836 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2837 I40E_PFINT_ICR0_ENA_VFLR_MASK | 2837 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2838 I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 2838 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2839 2839
2840 if (pf->flags & I40E_FLAG_PTP)
2841 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2842
2840 wr32(hw, I40E_PFINT_ICR0_ENA, val); 2843 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2841 2844
2842 /* SW_ITR_IDX = 0, but don't change INTENA */ 2845 /* SW_ITR_IDX = 0, but don't change INTENA */
@@ -2866,7 +2869,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2866 q_vector->tx.latency_range = I40E_LOW_LATENCY; 2869 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2867 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); 2870 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2868 2871
2869 i40e_enable_misc_int_causes(hw); 2872 i40e_enable_misc_int_causes(pf);
2870 2873
2871 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2874 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2872 wr32(hw, I40E_PFINT_LNKLST0, 0); 2875 wr32(hw, I40E_PFINT_LNKLST0, 0);
@@ -2937,7 +2940,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2937/** 2940/**
2938 * i40e_irq_dynamic_disable - Disable default interrupt generation settings 2941 * i40e_irq_dynamic_disable - Disable default interrupt generation settings
2939 * @vsi: pointer to a vsi 2942 * @vsi: pointer to a vsi
2940 * @vector: enable a particular Hw Interrupt vector 2943 * @vector: disable a particular Hw Interrupt vector
2941 **/ 2944 **/
2942void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) 2945void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
2943{ 2946{
@@ -3402,10 +3405,10 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3402 err = i40e_vsi_request_irq_msix(vsi, basename); 3405 err = i40e_vsi_request_irq_msix(vsi, basename);
3403 else if (pf->flags & I40E_FLAG_MSI_ENABLED) 3406 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3404 err = request_irq(pf->pdev->irq, i40e_intr, 0, 3407 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3405 pf->misc_int_name, pf); 3408 pf->int_name, pf);
3406 else 3409 else
3407 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, 3410 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3408 pf->misc_int_name, pf); 3411 pf->int_name, pf);
3409 3412
3410 if (err) 3413 if (err)
3411 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); 3414 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
@@ -3999,6 +4002,35 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
3999 4002
4000#endif 4003#endif
4001/** 4004/**
4005 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4006 * @pf: pointer to pf
4007 *
4008 * Get TC map for ISCSI PF type that will include iSCSI TC
4009 * and LAN TC.
4010 **/
4011static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4012{
4013 struct i40e_dcb_app_priority_table app;
4014 struct i40e_hw *hw = &pf->hw;
4015 u8 enabled_tc = 1; /* TC0 is always enabled */
4016 u8 tc, i;
4017 /* Get the iSCSI APP TLV */
4018 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4019
4020 for (i = 0; i < dcbcfg->numapps; i++) {
4021 app = dcbcfg->app[i];
4022 if (app.selector == I40E_APP_SEL_TCPIP &&
4023 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4024 tc = dcbcfg->etscfg.prioritytable[app.priority];
4025 enabled_tc |= (1 << tc);
4026 break;
4027 }
4028 }
4029
4030 return enabled_tc;
4031}
4032
4033/**
4002 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config 4034 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4003 * @dcbcfg: the corresponding DCBx configuration structure 4035 * @dcbcfg: the corresponding DCBx configuration structure
4004 * 4036 *
@@ -4061,18 +4093,23 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4061 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4093 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4062 return 1; 4094 return 1;
4063 4095
4096 /* SFP mode will be enabled for all TCs on port */
4097 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4098 return i40e_dcb_get_num_tc(dcbcfg);
4099
4064 /* MFP mode return count of enabled TCs for this PF */ 4100 /* MFP mode return count of enabled TCs for this PF */
4065 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 4101 if (pf->hw.func_caps.iscsi)
4102 enabled_tc = i40e_get_iscsi_tc_map(pf);
4103 else
4066 enabled_tc = pf->hw.func_caps.enabled_tcmap; 4104 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4067 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4068 if (enabled_tc & (1 << i))
4069 num_tc++;
4070 }
4071 return num_tc;
4072 }
4073 4105
4074 /* SFP mode will be enabled for all TCs on port */ 4106 /* At least have TC0 */
4075 return i40e_dcb_get_num_tc(dcbcfg); 4107 enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4108 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4109 if (enabled_tc & (1 << i))
4110 num_tc++;
4111 }
4112 return num_tc;
4076} 4113}
4077 4114
4078/** 4115/**
@@ -4110,12 +4147,15 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4110 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 4147 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4111 return i40e_pf_get_default_tc(pf); 4148 return i40e_pf_get_default_tc(pf);
4112 4149
4113 /* MFP mode will have enabled TCs set by FW */
4114 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4115 return pf->hw.func_caps.enabled_tcmap;
4116
4117 /* SFP mode we want PF to be enabled for all TCs */ 4150 /* SFP mode we want PF to be enabled for all TCs */
4118 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); 4151 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4152 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4153
4154 /* MPF enabled and iSCSI PF type */
4155 if (pf->hw.func_caps.iscsi)
4156 return i40e_get_iscsi_tc_map(pf);
4157 else
4158 return pf->hw.func_caps.enabled_tcmap;
4119} 4159}
4120 4160
4121/** 4161/**
@@ -4505,9 +4545,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
4505 struct i40e_hw *hw = &pf->hw; 4545 struct i40e_hw *hw = &pf->hw;
4506 int err = 0; 4546 int err = 0;
4507 4547
4508 if (pf->hw.func_caps.npar_enable)
4509 goto out;
4510
4511 /* Get the initial DCB configuration */ 4548 /* Get the initial DCB configuration */
4512 err = i40e_init_dcb(hw); 4549 err = i40e_init_dcb(hw);
4513 if (!err) { 4550 if (!err) {
@@ -4533,7 +4570,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
4533 "DCBX offload is supported for this PF.\n"); 4570 "DCBX offload is supported for this PF.\n");
4534 } 4571 }
4535 } else { 4572 } else {
4536 dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n", 4573 dev_info(&pf->pdev->dev,
4574 "AQ Querying DCB configuration failed: aq_err %d\n",
4537 pf->hw.aq.asq_last_status); 4575 pf->hw.aq.asq_last_status);
4538 } 4576 }
4539 4577
@@ -4557,6 +4595,15 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4557 return; 4595 return;
4558 } 4596 }
4559 4597
4598 /* Warn user if link speed on NPAR enabled partition is not at
4599 * least 10GB
4600 */
4601 if (vsi->back->hw.func_caps.npar_enable &&
4602 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4603 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4604 netdev_warn(vsi->netdev,
4605 "The partition detected link speed that is less than 10Gbps\n");
4606
4560 switch (vsi->back->hw.phy.link_info.link_speed) { 4607 switch (vsi->back->hw.phy.link_info.link_speed) {
4561 case I40E_LINK_SPEED_40GB: 4608 case I40E_LINK_SPEED_40GB:
4562 strlcpy(speed, "40 Gbps", SPEED_SIZE); 4609 strlcpy(speed, "40 Gbps", SPEED_SIZE);
@@ -4836,7 +4883,7 @@ static int i40e_open(struct net_device *netdev)
4836int i40e_vsi_open(struct i40e_vsi *vsi) 4883int i40e_vsi_open(struct i40e_vsi *vsi)
4837{ 4884{
4838 struct i40e_pf *pf = vsi->back; 4885 struct i40e_pf *pf = vsi->back;
4839 char int_name[IFNAMSIZ]; 4886 char int_name[I40E_INT_NAME_STR_LEN];
4840 int err; 4887 int err;
4841 4888
4842 /* allocate descriptors */ 4889 /* allocate descriptors */
@@ -4870,7 +4917,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
4870 goto err_set_queues; 4917 goto err_set_queues;
4871 4918
4872 } else if (vsi->type == I40E_VSI_FDIR) { 4919 } else if (vsi->type == I40E_VSI_FDIR) {
4873 snprintf(int_name, sizeof(int_name) - 1, "%s-%s-fdir", 4920 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
4874 dev_driver_string(&pf->pdev->dev), 4921 dev_driver_string(&pf->pdev->dev),
4875 dev_name(&pf->pdev->dev)); 4922 dev_name(&pf->pdev->dev));
4876 err = i40e_vsi_request_irq(vsi, int_name); 4923 err = i40e_vsi_request_irq(vsi, int_name);
@@ -5494,14 +5541,18 @@ static void i40e_link_event(struct i40e_pf *pf)
5494{ 5541{
5495 bool new_link, old_link; 5542 bool new_link, old_link;
5496 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 5543 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5544 u8 new_link_speed, old_link_speed;
5497 5545
5498 /* set this to force the get_link_status call to refresh state */ 5546 /* set this to force the get_link_status call to refresh state */
5499 pf->hw.phy.get_link_info = true; 5547 pf->hw.phy.get_link_info = true;
5500 5548
5501 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); 5549 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5502 new_link = i40e_get_link_status(&pf->hw); 5550 new_link = i40e_get_link_status(&pf->hw);
5551 old_link_speed = pf->hw.phy.link_info_old.link_speed;
5552 new_link_speed = pf->hw.phy.link_info.link_speed;
5503 5553
5504 if (new_link == old_link && 5554 if (new_link == old_link &&
5555 new_link_speed == old_link_speed &&
5505 (test_bit(__I40E_DOWN, &vsi->state) || 5556 (test_bit(__I40E_DOWN, &vsi->state) ||
5506 new_link == netif_carrier_ok(vsi->netdev))) 5557 new_link == netif_carrier_ok(vsi->netdev)))
5507 return; 5558 return;
@@ -6175,8 +6226,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6175#ifdef CONFIG_I40E_DCB 6226#ifdef CONFIG_I40E_DCB
6176 ret = i40e_init_pf_dcb(pf); 6227 ret = i40e_init_pf_dcb(pf);
6177 if (ret) { 6228 if (ret) {
6178 dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret); 6229 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6179 goto end_core_reset; 6230 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6231 /* Continue without DCB enabled */
6180 } 6232 }
6181#endif /* CONFIG_I40E_DCB */ 6233#endif /* CONFIG_I40E_DCB */
6182#ifdef I40E_FCOE 6234#ifdef I40E_FCOE
@@ -6881,17 +6933,17 @@ static int i40e_init_msix(struct i40e_pf *pf)
6881 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) 6933 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
6882 other_vecs++; 6934 other_vecs++;
6883 6935
6936 /* Scale down if necessary, and the rings will share vectors */
6937 pf->num_lan_msix = min_t(int, pf->num_lan_msix,
6938 (hw->func_caps.num_msix_vectors - other_vecs));
6939 v_budget = pf->num_lan_msix + other_vecs;
6940
6884#ifdef I40E_FCOE 6941#ifdef I40E_FCOE
6885 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { 6942 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6886 pf->num_fcoe_msix = pf->num_fcoe_qps; 6943 pf->num_fcoe_msix = pf->num_fcoe_qps;
6887 v_budget += pf->num_fcoe_msix; 6944 v_budget += pf->num_fcoe_msix;
6888 } 6945 }
6889
6890#endif 6946#endif
6891 /* Scale down if necessary, and the rings will share vectors */
6892 pf->num_lan_msix = min_t(int, pf->num_lan_msix,
6893 (hw->func_caps.num_msix_vectors - other_vecs));
6894 v_budget = pf->num_lan_msix + other_vecs;
6895 6947
6896 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), 6948 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
6897 GFP_KERNEL); 6949 GFP_KERNEL);
@@ -7113,16 +7165,16 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
7113 */ 7165 */
7114 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { 7166 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7115 err = request_irq(pf->msix_entries[0].vector, 7167 err = request_irq(pf->msix_entries[0].vector,
7116 i40e_intr, 0, pf->misc_int_name, pf); 7168 i40e_intr, 0, pf->int_name, pf);
7117 if (err) { 7169 if (err) {
7118 dev_info(&pf->pdev->dev, 7170 dev_info(&pf->pdev->dev,
7119 "request_irq for %s failed: %d\n", 7171 "request_irq for %s failed: %d\n",
7120 pf->misc_int_name, err); 7172 pf->int_name, err);
7121 return -EFAULT; 7173 return -EFAULT;
7122 } 7174 }
7123 } 7175 }
7124 7176
7125 i40e_enable_misc_int_causes(hw); 7177 i40e_enable_misc_int_causes(pf);
7126 7178
7127 /* associate no queues to the misc vector */ 7179 /* associate no queues to the misc vector */
7128 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); 7180 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
@@ -7306,7 +7358,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
7306 7358
7307#endif /* I40E_FCOE */ 7359#endif /* I40E_FCOE */
7308#ifdef CONFIG_PCI_IOV 7360#ifdef CONFIG_PCI_IOV
7309 if (pf->hw.func_caps.num_vfs) { 7361 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
7310 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; 7362 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7311 pf->flags |= I40E_FLAG_SRIOV_ENABLED; 7363 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7312 pf->num_req_vfs = min_t(int, 7364 pf->num_req_vfs = min_t(int,
@@ -7766,7 +7818,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7766 enabled_tc = i40e_pf_get_tc_map(pf); 7818 enabled_tc = i40e_pf_get_tc_map(pf);
7767 7819
7768 /* MFP mode setup queue map and update VSI */ 7820 /* MFP mode setup queue map and update VSI */
7769 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 7821 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
7822 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
7770 memset(&ctxt, 0, sizeof(ctxt)); 7823 memset(&ctxt, 0, sizeof(ctxt));
7771 ctxt.seid = pf->main_vsi_seid; 7824 ctxt.seid = pf->main_vsi_seid;
7772 ctxt.pf_num = pf->hw.pf_id; 7825 ctxt.pf_num = pf->hw.pf_id;
@@ -7787,6 +7840,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7787 /* Default/Main VSI is only enabled for TC0 7840 /* Default/Main VSI is only enabled for TC0
7788 * reconfigure it to enable all TCs that are 7841 * reconfigure it to enable all TCs that are
7789 * available on the port in SFP mode. 7842 * available on the port in SFP mode.
7843 * For MFP case the iSCSI PF would use this
7844 * flow to enable LAN+iSCSI TC.
7790 */ 7845 */
7791 ret = i40e_vsi_config_tc(vsi, enabled_tc); 7846 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7792 if (ret) { 7847 if (ret) {
@@ -9164,7 +9219,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9164 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; 9219 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9165 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; 9220 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
9166 9221
9167 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1, 9222 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
9168 "%s-%s:misc", 9223 "%s-%s:misc",
9169 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); 9224 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
9170 9225
@@ -9227,6 +9282,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9227 goto err_configure_lan_hmc; 9282 goto err_configure_lan_hmc;
9228 } 9283 }
9229 9284
9285 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
9286 * Ignore error return codes because if it was already disabled via
9287 * hardware settings this will fail
9288 */
9289 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9290 (pf->hw.aq.fw_maj_ver < 4)) {
9291 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
9292 i40e_aq_stop_lldp(hw, true, NULL);
9293 }
9294
9230 i40e_get_mac_addr(hw, hw->mac.addr); 9295 i40e_get_mac_addr(hw, hw->mac.addr);
9231 if (!is_valid_ether_addr(hw->mac.addr)) { 9296 if (!is_valid_ether_addr(hw->mac.addr)) {
9232 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); 9297 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
@@ -9256,7 +9321,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9256#ifdef CONFIG_I40E_DCB 9321#ifdef CONFIG_I40E_DCB
9257 err = i40e_init_pf_dcb(pf); 9322 err = i40e_init_pf_dcb(pf);
9258 if (err) { 9323 if (err) {
9259 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); 9324 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
9260 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; 9325 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9261 /* Continue without DCB enabled */ 9326 /* Continue without DCB enabled */
9262 } 9327 }
@@ -9671,6 +9736,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
9671 9736
9672 set_bit(__I40E_SUSPENDED, &pf->state); 9737 set_bit(__I40E_SUSPENDED, &pf->state);
9673 set_bit(__I40E_DOWN, &pf->state); 9738 set_bit(__I40E_DOWN, &pf->state);
9739 del_timer_sync(&pf->service_timer);
9740 cancel_work_sync(&pf->service_task);
9674 rtnl_lock(); 9741 rtnl_lock();
9675 i40e_prep_for_reset(pf); 9742 i40e_prep_for_reset(pf);
9676 rtnl_unlock(); 9743 rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 2fb4306597e8..68e852a96680 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -71,6 +71,9 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
71i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 71i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
72 u32 reg_addr, u64 reg_val, 72 u32 reg_addr, u64 reg_val,
73 struct i40e_asq_cmd_details *cmd_details); 73 struct i40e_asq_cmd_details *cmd_details);
74i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
75 u32 reg_addr, u64 *reg_val,
76 struct i40e_asq_cmd_details *cmd_details);
74i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 77i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
75 struct i40e_asq_cmd_details *cmd_details); 78 struct i40e_asq_cmd_details *cmd_details);
76i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, 79i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
@@ -245,6 +248,8 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw);
245bool i40e_get_link_status(struct i40e_hw *hw); 248bool i40e_get_link_status(struct i40e_hw *hw);
246i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); 249i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
247i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); 250i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
251i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
252 u32 pba_num_size);
248i40e_status i40e_validate_mac_addr(u8 *mac_addr); 253i40e_status i40e_validate_mac_addr(u8 *mac_addr);
249void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); 254void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
250#ifdef I40E_FCOE 255#ifdef I40E_FCOE
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 6d1ec926aa37..fabcfa1b45b2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -247,7 +247,12 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
247 u32 prttsyn_stat; 247 u32 prttsyn_stat;
248 int n; 248 int n;
249 249
250 if (!(pf->flags & I40E_FLAG_PTP)) 250 /* Since we cannot turn off the Rx timestamp logic if the device is
251 * configured for Tx timestamping, we check if Rx timestamping is
252 * configured. We don't want to spuriously warn about Rx timestamp
253 * hangs if we don't care about the timestamps.
254 */
255 if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
251 return; 256 return;
252 257
253 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); 258 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
@@ -305,6 +310,13 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
305 u32 hi, lo; 310 u32 hi, lo;
306 u64 ns; 311 u64 ns;
307 312
313 if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
314 return;
315
316 /* don't attempt to timestamp if we don't have an skb */
317 if (!pf->ptp_tx_skb)
318 return;
319
308 lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); 320 lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
309 hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); 321 hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
310 322
@@ -338,7 +350,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
338 /* Since we cannot turn off the Rx timestamp logic if the device is 350 /* Since we cannot turn off the Rx timestamp logic if the device is
339 * doing Tx timestamping, check if Rx timestamping is configured. 351 * doing Tx timestamping, check if Rx timestamping is configured.
340 */ 352 */
341 if (!pf->ptp_rx) 353 if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
342 return; 354 return;
343 355
344 hw = &pf->hw; 356 hw = &pf->hw;
@@ -467,7 +479,12 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
467 switch (config->rx_filter) { 479 switch (config->rx_filter) {
468 case HWTSTAMP_FILTER_NONE: 480 case HWTSTAMP_FILTER_NONE:
469 pf->ptp_rx = false; 481 pf->ptp_rx = false;
470 tsyntype = 0; 482 /* We set the type to V1, but do not enable UDP packet
483 * recognition. In this way, we should be as close to
484 * disabling PTP Rx timestamps as possible since V1 packets
485 * are always UDP, since L2 packets are a V2 feature.
486 */
487 tsyntype = I40E_PRTTSYN_CTL1_TSYNTYPE_V1;
471 break; 488 break;
472 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 489 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
473 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 490 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
@@ -521,17 +538,18 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
521 regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 538 regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
522 wr32(hw, I40E_PFINT_ICR0_ENA, regval); 539 wr32(hw, I40E_PFINT_ICR0_ENA, regval);
523 540
524 /* There is no simple on/off switch for Rx. To "disable" Rx support, 541 /* Although there is no simple on/off switch for Rx, we "disable" Rx
525 * ignore any received timestamps, rather than turn off the clock. 542 * timestamps by setting to V1 only mode and clear the UDP
543 * recognition. This ought to disable all PTP Rx timestamps as V1
544 * packets are always over UDP. Note that software is configured to
545 * ignore Rx timestamps via the pf->ptp_rx flag.
526 */ 546 */
527 if (pf->ptp_rx) { 547 regval = rd32(hw, I40E_PRTTSYN_CTL1);
528 regval = rd32(hw, I40E_PRTTSYN_CTL1); 548 /* clear everything but the enable bit */
529 /* clear everything but the enable bit */ 549 regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
530 regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK; 550 /* now enable bits for desired Rx timestamps */
531 /* now enable bits for desired Rx timestamps */ 551 regval |= tsyntype;
532 regval |= tsyntype; 552 wr32(hw, I40E_PRTTSYN_CTL1, regval);
533 wr32(hw, I40E_PRTTSYN_CTL1, regval);
534 }
535 553
536 return 0; 554 return 0;
537} 555}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index cecb340898fe..2206d2d36f0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -836,8 +836,8 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
836{ 836{
837 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 837 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
838 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 838 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
839 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK 839 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
840 /* allow 00 to be written to the index */; 840 /* allow 00 to be written to the index */
841 841
842 wr32(&vsi->back->hw, 842 wr32(&vsi->back->hw,
843 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), 843 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
@@ -1098,6 +1098,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1098 if (!rx_ring->rx_bi) 1098 if (!rx_ring->rx_bi)
1099 goto err; 1099 goto err;
1100 1100
1101 u64_stats_init(&rx_ring->syncp);
1102
1101 /* Round up to nearest 4K */ 1103 /* Round up to nearest 4K */
1102 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) 1104 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1103 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) 1105 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
@@ -1815,8 +1817,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1815 u32 tx_flags = 0; 1817 u32 tx_flags = 0;
1816 1818
1817 /* if we have a HW VLAN tag being added, default to the HW one */ 1819 /* if we have a HW VLAN tag being added, default to the HW one */
1818 if (vlan_tx_tag_present(skb)) { 1820 if (skb_vlan_tag_present(skb)) {
1819 tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; 1821 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1820 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 1822 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1821 /* else if it is a SW VLAN, check the next protocol and store the tag */ 1823 /* else if it is a SW VLAN, check the next protocol and store the tag */
1822 } else if (protocol == htons(ETH_P_8021Q)) { 1824 } else if (protocol == htons(ETH_P_8021Q)) {
@@ -1939,6 +1941,9 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
1939 * we are not already transmitting a packet to be timestamped 1941 * we are not already transmitting a packet to be timestamped
1940 */ 1942 */
1941 pf = i40e_netdev_to_pf(tx_ring->netdev); 1943 pf = i40e_netdev_to_pf(tx_ring->netdev);
1944 if (!(pf->flags & I40E_FLAG_PTP))
1945 return 0;
1946
1942 if (pf->ptp_tx && 1947 if (pf->ptp_tx &&
1943 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) { 1948 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
1944 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1949 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index c1f2eb963357..e9901ef06a63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -211,6 +211,7 @@ struct i40e_hw_capabilities {
211 bool evb_802_1_qbh; /* Bridge Port Extension */ 211 bool evb_802_1_qbh; /* Bridge Port Extension */
212 bool dcb; 212 bool dcb;
213 bool fcoe; 213 bool fcoe;
214 bool iscsi; /* Indicates iSCSI enabled */
214 bool mfp_mode_1; 215 bool mfp_mode_1;
215 bool mgmt_cem; 216 bool mgmt_cem;
216 bool ieee_1588; 217 bool ieee_1588;
@@ -431,7 +432,7 @@ struct i40e_hw {
431 u8 __iomem *hw_addr; 432 u8 __iomem *hw_addr;
432 void *back; 433 void *back;
433 434
434 /* function pointer structs */ 435 /* subsystem structs */
435 struct i40e_phy_info phy; 436 struct i40e_phy_info phy;
436 struct i40e_mac_info mac; 437 struct i40e_mac_info mac;
437 struct i40e_bus_info bus; 438 struct i40e_bus_info bus;
@@ -458,6 +459,11 @@ struct i40e_hw {
458 u8 pf_id; 459 u8 pf_id;
459 u16 main_vsi_seid; 460 u16 main_vsi_seid;
460 461
462 /* for multi-function MACs */
463 u16 partition_id;
464 u16 num_partitions;
465 u16 num_ports;
466
461 /* Closest numa node to the device */ 467 /* Closest numa node to the device */
462 u16 numa_node; 468 u16 numa_node;
463 469
@@ -1135,6 +1141,8 @@ struct i40e_hw_port_stats {
1135/* Checksum and Shadow RAM pointers */ 1141/* Checksum and Shadow RAM pointers */
1136#define I40E_SR_NVM_CONTROL_WORD 0x00 1142#define I40E_SR_NVM_CONTROL_WORD 0x00
1137#define I40E_SR_EMP_MODULE_PTR 0x0F 1143#define I40E_SR_EMP_MODULE_PTR 0x0F
1144#define I40E_SR_PBA_FLAGS 0x15
1145#define I40E_SR_PBA_BLOCK_PTR 0x16
1138#define I40E_SR_NVM_IMAGE_VERSION 0x18 1146#define I40E_SR_NVM_IMAGE_VERSION 0x18
1139#define I40E_SR_NVM_WAKE_ON_LAN 0x19 1147#define I40E_SR_NVM_WAKE_ON_LAN 0x19
1140#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 1148#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 5bae89550657..40f042af4131 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -647,6 +647,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
647 int i; 647 int i;
648 u32 reg; 648 u32 reg;
649 649
650 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
651 return;
652
650 /* warn the VF */ 653 /* warn the VF */
651 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); 654 clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
652 655
@@ -668,13 +671,13 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
668 /* poll VPGEN_VFRSTAT reg to make sure 671 /* poll VPGEN_VFRSTAT reg to make sure
669 * that reset is complete 672 * that reset is complete
670 */ 673 */
671 for (i = 0; i < 100; i++) { 674 for (i = 0; i < 10; i++) {
672 /* vf reset requires driver to first reset the 675 /* VF reset requires driver to first reset the VF and then
673 * vf and then poll the status register to make sure 676 * poll the status register to make sure that the reset
674 * that the requested op was completed 677 * completed successfully. Due to internal HW FIFO flushes,
675 * successfully 678 * we must wait 10ms before the register will be valid.
676 */ 679 */
677 usleep_range(10, 20); 680 usleep_range(10000, 20000);
678 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); 681 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
679 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { 682 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
680 rsd = true; 683 rsd = true;
@@ -706,6 +709,7 @@ complete_reset:
706 /* tell the VF the reset is done */ 709 /* tell the VF the reset is done */
707 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 710 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
708 i40e_flush(hw); 711 i40e_flush(hw);
712 clear_bit(__I40E_VF_DISABLE, &pf->state);
709} 713}
710 714
711/** 715/**
@@ -790,11 +794,18 @@ void i40e_free_vfs(struct i40e_pf *pf)
790 794
791 if (!pf->vf) 795 if (!pf->vf)
792 return; 796 return;
797 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
798 usleep_range(1000, 2000);
793 799
794 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 800 /* Disable IOV before freeing resources. This lets any VF drivers
795 i40e_irq_dynamic_disable_icr0(pf); 801 * running in the host get themselves cleaned up before we yank
802 * the carpet out from underneath their feet.
803 */
804 if (!pci_vfs_assigned(pf->pdev))
805 pci_disable_sriov(pf->pdev);
806
807 msleep(20); /* let any messages in transit get finished up */
796 808
797 mdelay(10); /* let any messages in transit get finished up */
798 /* free up vf resources */ 809 /* free up vf resources */
799 tmp = pf->num_alloc_vfs; 810 tmp = pf->num_alloc_vfs;
800 pf->num_alloc_vfs = 0; 811 pf->num_alloc_vfs = 0;
@@ -813,7 +824,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
813 * before this function ever gets called. 824 * before this function ever gets called.
814 */ 825 */
815 if (!pci_vfs_assigned(pf->pdev)) { 826 if (!pci_vfs_assigned(pf->pdev)) {
816 pci_disable_sriov(pf->pdev);
817 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 827 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
818 * work correctly when SR-IOV gets re-enabled. 828 * work correctly when SR-IOV gets re-enabled.
819 */ 829 */
@@ -827,9 +837,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
827 dev_warn(&pf->pdev->dev, 837 dev_warn(&pf->pdev->dev,
828 "unable to disable SR-IOV because VFs are assigned.\n"); 838 "unable to disable SR-IOV because VFs are assigned.\n");
829 } 839 }
830 840 clear_bit(__I40E_VF_DISABLE, &pf->state);
831 /* Re-enable interrupt 0. */
832 i40e_irq_dynamic_enable_icr0(pf);
833} 841}
834 842
835#ifdef CONFIG_PCI_IOV 843#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 6c31bf22c2c3..60f04e96a80e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -148,7 +148,7 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
148 148
149/* general information */ 149/* general information */
150#define I40E_AQ_LARGE_BUF 512 150#define I40E_AQ_LARGE_BUF 512
151#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ 151#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
152 152
153void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, 153void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
154 u16 opcode); 154 u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index ff1b16370da9..e715bccfb5d2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -268,6 +268,8 @@ enum i40e_admin_queue_opc {
268 /* OEM commands */ 268 /* OEM commands */
269 i40e_aqc_opc_oem_parameter_change = 0xFE00, 269 i40e_aqc_opc_oem_parameter_change = 0xFE00,
270 i40e_aqc_opc_oem_device_status_change = 0xFE01, 270 i40e_aqc_opc_oem_device_status_change = 0xFE01,
271 i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
272 i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
271 273
272 /* debug commands */ 274 /* debug commands */
273 i40e_aqc_opc_debug_get_deviceid = 0xFF00, 275 i40e_aqc_opc_debug_get_deviceid = 0xFF00,
@@ -276,7 +278,6 @@ enum i40e_admin_queue_opc {
276 i40e_aqc_opc_debug_write_reg = 0xFF04, 278 i40e_aqc_opc_debug_write_reg = 0xFF04,
277 i40e_aqc_opc_debug_modify_reg = 0xFF07, 279 i40e_aqc_opc_debug_modify_reg = 0xFF07,
278 i40e_aqc_opc_debug_dump_internals = 0xFF08, 280 i40e_aqc_opc_debug_dump_internals = 0xFF08,
279 i40e_aqc_opc_debug_modify_internals = 0xFF09,
280}; 281};
281 282
282/* command structures and indirect data structures */ 283/* command structures and indirect data structures */
@@ -410,6 +411,7 @@ struct i40e_aqc_list_capabilities_element_resp {
410#define I40E_AQ_CAP_ID_VSI 0x0017 411#define I40E_AQ_CAP_ID_VSI 0x0017
411#define I40E_AQ_CAP_ID_DCB 0x0018 412#define I40E_AQ_CAP_ID_DCB 0x0018
412#define I40E_AQ_CAP_ID_FCOE 0x0021 413#define I40E_AQ_CAP_ID_FCOE 0x0021
414#define I40E_AQ_CAP_ID_ISCSI 0x0022
413#define I40E_AQ_CAP_ID_RSS 0x0040 415#define I40E_AQ_CAP_ID_RSS 0x0040
414#define I40E_AQ_CAP_ID_RXQ 0x0041 416#define I40E_AQ_CAP_ID_RXQ 0x0041
415#define I40E_AQ_CAP_ID_TXQ 0x0042 417#define I40E_AQ_CAP_ID_TXQ 0x0042
@@ -454,8 +456,11 @@ struct i40e_aqc_arp_proxy_data {
454 __le32 pfpm_proxyfc; 456 __le32 pfpm_proxyfc;
455 __le32 ip_addr; 457 __le32 ip_addr;
456 u8 mac_addr[6]; 458 u8 mac_addr[6];
459 u8 reserved[2];
457}; 460};
458 461
462I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
463
459/* Set NS Proxy Table Entry Command (indirect 0x0105) */ 464/* Set NS Proxy Table Entry Command (indirect 0x0105) */
460struct i40e_aqc_ns_proxy_data { 465struct i40e_aqc_ns_proxy_data {
461 __le16 table_idx_mac_addr_0; 466 __le16 table_idx_mac_addr_0;
@@ -481,6 +486,8 @@ struct i40e_aqc_ns_proxy_data {
481 u8 ipv6_addr_1[16]; 486 u8 ipv6_addr_1[16];
482}; 487};
483 488
489I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
490
484/* Manage LAA Command (0x0106) - obsolete */ 491/* Manage LAA Command (0x0106) - obsolete */
485struct i40e_aqc_mng_laa { 492struct i40e_aqc_mng_laa {
486 __le16 command_flags; 493 __le16 command_flags;
@@ -491,6 +498,8 @@ struct i40e_aqc_mng_laa {
491 u8 reserved2[6]; 498 u8 reserved2[6];
492}; 499};
493 500
501I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
502
494/* Manage MAC Address Read Command (indirect 0x0107) */ 503/* Manage MAC Address Read Command (indirect 0x0107) */
495struct i40e_aqc_mac_address_read { 504struct i40e_aqc_mac_address_read {
496 __le16 command_flags; 505 __le16 command_flags;
@@ -562,6 +571,8 @@ struct i40e_aqc_get_switch_config_header_resp {
562 u8 reserved[12]; 571 u8 reserved[12];
563}; 572};
564 573
574I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
575
565struct i40e_aqc_switch_config_element_resp { 576struct i40e_aqc_switch_config_element_resp {
566 u8 element_type; 577 u8 element_type;
567#define I40E_AQ_SW_ELEM_TYPE_MAC 1 578#define I40E_AQ_SW_ELEM_TYPE_MAC 1
@@ -587,6 +598,8 @@ struct i40e_aqc_switch_config_element_resp {
587 __le16 element_info; 598 __le16 element_info;
588}; 599};
589 600
601I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
602
590/* Get Switch Configuration (indirect 0x0200) 603/* Get Switch Configuration (indirect 0x0200)
591 * an array of elements are returned in the response buffer 604 * an array of elements are returned in the response buffer
592 * the first in the array is the header, remainder are elements 605 * the first in the array is the header, remainder are elements
@@ -596,6 +609,8 @@ struct i40e_aqc_get_switch_config_resp {
596 struct i40e_aqc_switch_config_element_resp element[1]; 609 struct i40e_aqc_switch_config_element_resp element[1];
597}; 610};
598 611
612I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
613
599/* Add Statistics (direct 0x0201) 614/* Add Statistics (direct 0x0201)
600 * Remove Statistics (direct 0x0202) 615 * Remove Statistics (direct 0x0202)
601 */ 616 */
@@ -661,6 +676,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
661 u8 reserved2[6]; 676 u8 reserved2[6];
662}; 677};
663 678
679I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
680
664/* Add VSI (indirect 0x0210) 681/* Add VSI (indirect 0x0210)
665 * this indirect command uses struct i40e_aqc_vsi_properties_data 682 * this indirect command uses struct i40e_aqc_vsi_properties_data
666 * as the indirect buffer (128 bytes) 683 * as the indirect buffer (128 bytes)
@@ -1092,6 +1109,8 @@ struct i40e_aqc_remove_tag {
1092 u8 reserved[12]; 1109 u8 reserved[12];
1093}; 1110};
1094 1111
1112I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
1113
1095/* Add multicast E-Tag (direct 0x0257) 1114/* Add multicast E-Tag (direct 0x0257)
1096 * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields 1115 * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
1097 * and no external data 1116 * and no external data
@@ -1207,7 +1226,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1207 } ipaddr; 1226 } ipaddr;
1208 __le16 flags; 1227 __le16 flags;
1209#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 1228#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
1210#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ 1229#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
1211 I40E_AQC_ADD_CLOUD_FILTER_SHIFT) 1230 I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
1212/* 0x0000 reserved */ 1231/* 0x0000 reserved */
1213#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 1232#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
@@ -1240,7 +1259,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1240 u8 reserved[4]; 1259 u8 reserved[4];
1241 __le16 queue_number; 1260 __le16 queue_number;
1242#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 1261#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
1243#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ 1262#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
1244 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) 1263 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
1245 u8 reserved2[14]; 1264 u8 reserved2[14];
1246 /* response section */ 1265 /* response section */
@@ -1359,6 +1378,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
1359 u8 reserved1[28]; 1378 u8 reserved1[28];
1360}; 1379};
1361 1380
1381I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
1382
1362/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) 1383/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
1363 * responds with i40e_aqc_qs_handles_resp 1384 * responds with i40e_aqc_qs_handles_resp
1364 */ 1385 */
@@ -1370,6 +1391,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
1370 __le16 qs_handles[8]; 1391 __le16 qs_handles[8];
1371}; 1392};
1372 1393
1394I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
1395
1373/* Query vsi bw configuration (indirect 0x0408) */ 1396/* Query vsi bw configuration (indirect 0x0408) */
1374struct i40e_aqc_query_vsi_bw_config_resp { 1397struct i40e_aqc_query_vsi_bw_config_resp {
1375 u8 tc_valid_bits; 1398 u8 tc_valid_bits;
@@ -1383,6 +1406,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
1383 u8 reserved3[23]; 1406 u8 reserved3[23];
1384}; 1407};
1385 1408
1409I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
1410
1386/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ 1411/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
1387struct i40e_aqc_query_vsi_ets_sla_config_resp { 1412struct i40e_aqc_query_vsi_ets_sla_config_resp {
1388 u8 tc_valid_bits; 1413 u8 tc_valid_bits;
@@ -1394,6 +1419,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
1394 __le16 tc_bw_max[2]; 1419 __le16 tc_bw_max[2];
1395}; 1420};
1396 1421
1422I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
1423
1397/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ 1424/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
1398struct i40e_aqc_configure_switching_comp_bw_limit { 1425struct i40e_aqc_configure_switching_comp_bw_limit {
1399 __le16 seid; 1426 __le16 seid;
@@ -1421,6 +1448,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
1421 u8 reserved2[96]; 1448 u8 reserved2[96];
1422}; 1449};
1423 1450
1451I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
1452
1424/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ 1453/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
1425struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { 1454struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
1426 u8 tc_valid_bits; 1455 u8 tc_valid_bits;
@@ -1432,6 +1461,9 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
1432 u8 reserved1[28]; 1461 u8 reserved1[28];
1433}; 1462};
1434 1463
1464I40E_CHECK_STRUCT_LEN(0x40,
1465 i40e_aqc_configure_switching_comp_ets_bw_limit_data);
1466
1435/* Configure Switching Component Bandwidth Allocation per Tc 1467/* Configure Switching Component Bandwidth Allocation per Tc
1436 * (indirect 0x0417) 1468 * (indirect 0x0417)
1437 */ 1469 */
@@ -1443,6 +1475,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
1443 u8 reserved1[20]; 1475 u8 reserved1[20];
1444}; 1476};
1445 1477
1478I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
1479
1446/* Query Switching Component Configuration (indirect 0x0418) */ 1480/* Query Switching Component Configuration (indirect 0x0418) */
1447struct i40e_aqc_query_switching_comp_ets_config_resp { 1481struct i40e_aqc_query_switching_comp_ets_config_resp {
1448 u8 tc_valid_bits; 1482 u8 tc_valid_bits;
@@ -1453,6 +1487,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
1453 u8 reserved2[23]; 1487 u8 reserved2[23];
1454}; 1488};
1455 1489
1490I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
1491
1456/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ 1492/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
1457struct i40e_aqc_query_port_ets_config_resp { 1493struct i40e_aqc_query_port_ets_config_resp {
1458 u8 reserved[4]; 1494 u8 reserved[4];
@@ -1468,6 +1504,8 @@ struct i40e_aqc_query_port_ets_config_resp {
1468 u8 reserved3[32]; 1504 u8 reserved3[32];
1469}; 1505};
1470 1506
1507I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
1508
1471/* Query Switching Component Bandwidth Allocation per Traffic Type 1509/* Query Switching Component Bandwidth Allocation per Traffic Type
1472 * (indirect 0x041A) 1510 * (indirect 0x041A)
1473 */ 1511 */
@@ -1482,6 +1520,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
1482 __le16 tc_bw_max[2]; 1520 __le16 tc_bw_max[2];
1483}; 1521};
1484 1522
1523I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
1524
1485/* Suspend/resume port TX traffic 1525/* Suspend/resume port TX traffic
1486 * (direct 0x041B and 0x041C) uses the generic SEID struct 1526 * (direct 0x041B and 0x041C) uses the generic SEID struct
1487 */ 1527 */
@@ -1495,6 +1535,8 @@ struct i40e_aqc_configure_partition_bw_data {
1495 u8 max_bw[16]; /* bandwidth limit */ 1535 u8 max_bw[16]; /* bandwidth limit */
1496}; 1536};
1497 1537
1538I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
1539
1498/* Get and set the active HMC resource profile and status. 1540/* Get and set the active HMC resource profile and status.
1499 * (direct 0x0500) and (direct 0x0501) 1541 * (direct 0x0500) and (direct 0x0501)
1500 */ 1542 */
@@ -1577,6 +1619,8 @@ struct i40e_aqc_module_desc {
1577 u8 reserved2[8]; 1619 u8 reserved2[8];
1578}; 1620};
1579 1621
1622I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
1623
1580struct i40e_aq_get_phy_abilities_resp { 1624struct i40e_aq_get_phy_abilities_resp {
1581 __le32 phy_type; /* bitmap using the above enum for offsets */ 1625 __le32 phy_type; /* bitmap using the above enum for offsets */
1582 u8 link_speed; /* bitmap using the above enum bit patterns */ 1626 u8 link_speed; /* bitmap using the above enum bit patterns */
@@ -1605,6 +1649,8 @@ struct i40e_aq_get_phy_abilities_resp {
1605 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; 1649 struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
1606}; 1650};
1607 1651
1652I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
1653
1608/* Set PHY Config (direct 0x0601) */ 1654/* Set PHY Config (direct 0x0601) */
1609struct i40e_aq_set_phy_config { /* same bits as above in all */ 1655struct i40e_aq_set_phy_config { /* same bits as above in all */
1610 __le32 phy_type; 1656 __le32 phy_type;
@@ -1788,12 +1834,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
1788/* NVM Config Read (indirect 0x0704) */ 1834/* NVM Config Read (indirect 0x0704) */
1789struct i40e_aqc_nvm_config_read { 1835struct i40e_aqc_nvm_config_read {
1790 __le16 cmd_flags; 1836 __le16 cmd_flags;
1791#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 1837#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
1792#define ANVM_READ_SINGLE_FEATURE 0 1838#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
1793#define ANVM_READ_MULTIPLE_FEATURES 1 1839#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
1794 __le16 element_count; 1840 __le16 element_count;
1795 __le16 element_id; /* Feature/field ID */ 1841 __le16 element_id; /* Feature/field ID */
1796 u8 reserved[2]; 1842 __le16 element_id_msw; /* MSWord of field ID */
1797 __le32 address_high; 1843 __le32 address_high;
1798 __le32 address_low; 1844 __le32 address_low;
1799}; 1845};
@@ -1811,21 +1857,32 @@ struct i40e_aqc_nvm_config_write {
1811 1857
1812I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); 1858I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
1813 1859
1860/* Used for 0x0704 as well as for 0x0705 commands */
1861#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
1862#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
1863 (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
1864#define I40E_AQ_ANVM_FEATURE 0
1865#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
1814struct i40e_aqc_nvm_config_data_feature { 1866struct i40e_aqc_nvm_config_data_feature {
1815 __le16 feature_id; 1867 __le16 feature_id;
1816 __le16 instance_id; 1868#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
1869#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
1870#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
1817 __le16 feature_options; 1871 __le16 feature_options;
1818 __le16 feature_selection; 1872 __le16 feature_selection;
1819}; 1873};
1820 1874
1875I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
1876
1821struct i40e_aqc_nvm_config_data_immediate_field { 1877struct i40e_aqc_nvm_config_data_immediate_field {
1822#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2 1878 __le32 field_id;
1823 __le16 field_id; 1879 __le32 field_value;
1824 __le16 instance_id;
1825 __le16 field_options; 1880 __le16 field_options;
1826 __le16 field_value; 1881 __le16 reserved;
1827}; 1882};
1828 1883
1884I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
1885
1829/* Send to PF command (indirect 0x0801) id is only used by PF 1886/* Send to PF command (indirect 0x0801) id is only used by PF
1830 * Send to VF command (indirect 0x0802) id is only used by PF 1887 * Send to VF command (indirect 0x0802) id is only used by PF
1831 * Send to Peer PF command (indirect 0x0803) 1888 * Send to Peer PF command (indirect 0x0803)
@@ -2082,7 +2139,8 @@ struct i40e_aqc_oem_param_change {
2082#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 2139#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
2083#define I40E_AQ_OEM_PARAM_MAC 2 2140#define I40E_AQ_OEM_PARAM_MAC 2
2084 __le32 param_value1; 2141 __le32 param_value1;
2085 u8 param_value2[8]; 2142 __le16 param_value2;
2143 u8 reserved[6];
2086}; 2144};
2087 2145
2088I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); 2146I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@@ -2096,6 +2154,28 @@ struct i40e_aqc_oem_state_change {
2096 2154
2097I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); 2155I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
2098 2156
2157/* Initialize OCSD (0xFE02, direct) */
2158struct i40e_aqc_opc_oem_ocsd_initialize {
2159 u8 type_status;
2160 u8 reserved1[3];
2161 __le32 ocsd_memory_block_addr_high;
2162 __le32 ocsd_memory_block_addr_low;
2163 __le32 requested_update_interval;
2164};
2165
2166I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
2167
2168/* Initialize OCBB (0xFE03, direct) */
2169struct i40e_aqc_opc_oem_ocbb_initialize {
2170 u8 type_status;
2171 u8 reserved1[3];
2172 __le32 ocbb_memory_block_addr_high;
2173 __le32 ocbb_memory_block_addr_low;
2174 u8 reserved2[4];
2175};
2176
2177I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
2178
2099/* debug commands */ 2179/* debug commands */
2100 2180
2101/* get device id (0xFF00) uses the generic structure */ 2181/* get device id (0xFF00) uses the generic structure */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 04c7c1557a0c..29004382f462 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -192,6 +192,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
192 return le32_to_cpu(*(volatile __le32 *)head); 192 return le32_to_cpu(*(volatile __le32 *)head);
193} 193}
194 194
195#define WB_STRIDE 0x3
196
195/** 197/**
196 * i40e_clean_tx_irq - Reclaim resources after transmit completes 198 * i40e_clean_tx_irq - Reclaim resources after transmit completes
197 * @tx_ring: tx ring to clean 199 * @tx_ring: tx ring to clean
@@ -293,6 +295,14 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
293 tx_ring->q_vector->tx.total_bytes += total_bytes; 295 tx_ring->q_vector->tx.total_bytes += total_bytes;
294 tx_ring->q_vector->tx.total_packets += total_packets; 296 tx_ring->q_vector->tx.total_packets += total_packets;
295 297
298 if (budget &&
299 !((i & WB_STRIDE) == WB_STRIDE) &&
300 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
301 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
302 tx_ring->arm_wb = true;
303 else
304 tx_ring->arm_wb = false;
305
296 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { 306 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
297 /* schedule immediate reset if we believe we hung */ 307 /* schedule immediate reset if we believe we hung */
298 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" 308 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -344,6 +354,24 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
344} 354}
345 355
346/** 356/**
357 * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors
358 * @vsi: the VSI we care about
359 * @q_vector: the vector on which to force writeback
360 *
361 **/
362static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
363{
364 u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
365 I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
366 I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
367 /* allow 00 to be written to the index */
368
369 wr32(&vsi->back->hw,
370 I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
371 val);
372}
373
374/**
347 * i40e_set_new_dynamic_itr - Find new ITR level 375 * i40e_set_new_dynamic_itr - Find new ITR level
348 * @rc: structure containing ring performance data 376 * @rc: structure containing ring performance data
349 * 377 *
@@ -568,6 +596,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
568 if (!rx_ring->rx_bi) 596 if (!rx_ring->rx_bi)
569 goto err; 597 goto err;
570 598
599 u64_stats_init(&rx_ring->syncp);
600
571 /* Round up to nearest 4K */ 601 /* Round up to nearest 4K */
572 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) 602 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
573 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) 603 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
@@ -1065,6 +1095,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1065 struct i40e_vsi *vsi = q_vector->vsi; 1095 struct i40e_vsi *vsi = q_vector->vsi;
1066 struct i40e_ring *ring; 1096 struct i40e_ring *ring;
1067 bool clean_complete = true; 1097 bool clean_complete = true;
1098 bool arm_wb = false;
1068 int budget_per_ring; 1099 int budget_per_ring;
1069 1100
1070 if (test_bit(__I40E_DOWN, &vsi->state)) { 1101 if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1075,8 +1106,10 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1075 /* Since the actual Tx work is minimal, we can give the Tx a larger 1106 /* Since the actual Tx work is minimal, we can give the Tx a larger
1076 * budget and be more aggressive about cleaning up the Tx descriptors. 1107 * budget and be more aggressive about cleaning up the Tx descriptors.
1077 */ 1108 */
1078 i40e_for_each_ring(ring, q_vector->tx) 1109 i40e_for_each_ring(ring, q_vector->tx) {
1079 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); 1110 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1111 arm_wb |= ring->arm_wb;
1112 }
1080 1113
1081 /* We attempt to distribute budget to each Rx queue fairly, but don't 1114 /* We attempt to distribute budget to each Rx queue fairly, but don't
1082 * allow the budget to go below 1 because that would exit polling early. 1115 * allow the budget to go below 1 because that would exit polling early.
@@ -1087,8 +1120,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
1087 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); 1120 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
1088 1121
1089 /* If work not completed, return budget and polling will return */ 1122 /* If work not completed, return budget and polling will return */
1090 if (!clean_complete) 1123 if (!clean_complete) {
1124 if (arm_wb)
1125 i40e_force_wb(vsi, q_vector);
1091 return budget; 1126 return budget;
1127 }
1092 1128
1093 /* Work is done so exit the polling mode and re-enable the interrupt */ 1129 /* Work is done so exit the polling mode and re-enable the interrupt */
1094 napi_complete(napi); 1130 napi_complete(napi);
@@ -1122,8 +1158,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1122 u32 tx_flags = 0; 1158 u32 tx_flags = 0;
1123 1159
1124 /* if we have a HW VLAN tag being added, default to the HW one */ 1160 /* if we have a HW VLAN tag being added, default to the HW one */
1125 if (vlan_tx_tag_present(skb)) { 1161 if (skb_vlan_tag_present(skb)) {
1126 tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; 1162 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1127 tx_flags |= I40E_TX_FLAGS_HW_VLAN; 1163 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1128 /* else if it is a SW VLAN, check the next protocol and store the tag */ 1164 /* else if it is a SW VLAN, check the next protocol and store the tag */
1129 } else if (protocol == htons(ETH_P_8021Q)) { 1165 } else if (protocol == htons(ETH_P_8021Q)) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c7f29626eada..4e15903b2b6d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -238,6 +238,7 @@ struct i40e_ring {
238 u8 atr_count; 238 u8 atr_count;
239 239
240 bool ring_active; /* is ring online or not */ 240 bool ring_active; /* is ring online or not */
241 bool arm_wb; /* do something to arm write back */
241 242
242 /* stats structs */ 243 /* stats structs */
243 struct i40e_queue_stats stats; 244 struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 68aec11f6523..3d0fdaab5cc8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -211,6 +211,7 @@ struct i40e_hw_capabilities {
211 bool evb_802_1_qbh; /* Bridge Port Extension */ 211 bool evb_802_1_qbh; /* Bridge Port Extension */
212 bool dcb; 212 bool dcb;
213 bool fcoe; 213 bool fcoe;
214 bool iscsi; /* Indicates iSCSI enabled */
214 bool mfp_mode_1; 215 bool mfp_mode_1;
215 bool mgmt_cem; 216 bool mgmt_cem;
216 bool ieee_1588; 217 bool ieee_1588;
@@ -425,7 +426,7 @@ struct i40e_hw {
425 u8 __iomem *hw_addr; 426 u8 __iomem *hw_addr;
426 void *back; 427 void *back;
427 428
428 /* function pointer structs */ 429 /* subsystem structs */
429 struct i40e_phy_info phy; 430 struct i40e_phy_info phy;
430 struct i40e_mac_info mac; 431 struct i40e_mac_info mac;
431 struct i40e_bus_info bus; 432 struct i40e_bus_info bus;
@@ -452,6 +453,11 @@ struct i40e_hw {
452 u8 pf_id; 453 u8 pf_id;
453 u16 main_vsi_seid; 454 u16 main_vsi_seid;
454 455
456 /* for multi-function MACs */
457 u16 partition_id;
458 u16 num_partitions;
459 u16 num_ports;
460
455 /* Closest numa node to the device */ 461 /* Closest numa node to the device */
456 u16 numa_node; 462 u16 numa_node;
457 463
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index cabaf599f562..8d8c201c63c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
36static const char i40evf_driver_string[] = 36static const char i40evf_driver_string[] =
37 "Intel(R) XL710/X710 Virtual Function Network Driver"; 37 "Intel(R) XL710/X710 Virtual Function Network Driver";
38 38
39#define DRV_VERSION "1.0.6" 39#define DRV_VERSION "1.2.0"
40const char i40evf_driver_version[] = DRV_VERSION; 40const char i40evf_driver_version[] = DRV_VERSION;
41static const char i40evf_copyright[] = 41static const char i40evf_copyright[] =
42 "Copyright (c) 2013 - 2014 Intel Corporation."; 42 "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -313,10 +313,6 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
313 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; 313 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
314 wr32(hw, I40E_VFINT_DYN_CTL01, val); 314 wr32(hw, I40E_VFINT_DYN_CTL01, val);
315 315
316 /* re-enable interrupt causes */
317 wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
318 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
319
320 /* schedule work on the private workqueue */ 316 /* schedule work on the private workqueue */
321 schedule_work(&adapter->adminq_task); 317 schedule_work(&adapter->adminq_task);
322 318
@@ -947,30 +943,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
947} 943}
948 944
949/** 945/**
950 * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
951 * @adapter: board private structure
952 **/
953static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
954{
955 int i;
956
957 for (i = 0; i < adapter->num_active_queues; i++)
958 i40evf_clean_rx_ring(adapter->rx_rings[i]);
959}
960
961/**
962 * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
963 * @adapter: board private structure
964 **/
965static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
966{
967 int i;
968
969 for (i = 0; i < adapter->num_active_queues; i++)
970 i40evf_clean_tx_ring(adapter->tx_rings[i]);
971}
972
973/**
974 * i40e_down - Shutdown the connection processing 946 * i40e_down - Shutdown the connection processing
975 * @adapter: board private structure 947 * @adapter: board private structure
976 **/ 948 **/
@@ -982,6 +954,12 @@ void i40evf_down(struct i40evf_adapter *adapter)
982 if (adapter->state == __I40EVF_DOWN) 954 if (adapter->state == __I40EVF_DOWN)
983 return; 955 return;
984 956
957 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
958 &adapter->crit_section))
959 usleep_range(500, 1000);
960
961 i40evf_irq_disable(adapter);
962
985 /* remove all MAC filters */ 963 /* remove all MAC filters */
986 list_for_each_entry(f, &adapter->mac_filter_list, list) { 964 list_for_each_entry(f, &adapter->mac_filter_list, list) {
987 f->remove = true; 965 f->remove = true;
@@ -992,25 +970,27 @@ void i40evf_down(struct i40evf_adapter *adapter)
992 } 970 }
993 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && 971 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
994 adapter->state != __I40EVF_RESETTING) { 972 adapter->state != __I40EVF_RESETTING) {
995 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; 973 /* cancel any current operation */
974 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
975 adapter->aq_pending = 0;
976 /* Schedule operations to close down the HW. Don't wait
977 * here for this to complete. The watchdog is still running
978 * and it will take care of this.
979 */
980 adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
996 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 981 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
997 /* disable receives */
998 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; 982 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
999 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1000 msleep(20);
1001 } 983 }
1002 netif_tx_disable(netdev); 984 netif_tx_disable(netdev);
1003 985
1004 netif_tx_stop_all_queues(netdev); 986 netif_tx_stop_all_queues(netdev);
1005 987
1006 i40evf_irq_disable(adapter);
1007
1008 i40evf_napi_disable_all(adapter); 988 i40evf_napi_disable_all(adapter);
1009 989
1010 netif_carrier_off(netdev); 990 msleep(20);
1011 991
1012 i40evf_clean_all_tx_rings(adapter); 992 netif_carrier_off(netdev);
1013 i40evf_clean_all_rx_rings(adapter); 993 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1014} 994}
1015 995
1016/** 996/**
@@ -1356,8 +1336,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
1356 /* Process admin queue tasks. After init, everything gets done 1336 /* Process admin queue tasks. After init, everything gets done
1357 * here so we don't race on the admin queue. 1337 * here so we don't race on the admin queue.
1358 */ 1338 */
1359 if (adapter->aq_pending) 1339 if (adapter->aq_pending) {
1340 if (!i40evf_asq_done(hw)) {
1341 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1342 i40evf_send_api_ver(adapter);
1343 }
1360 goto watchdog_done; 1344 goto watchdog_done;
1345 }
1361 1346
1362 if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { 1347 if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
1363 i40evf_map_queues(adapter); 1348 i40evf_map_queues(adapter);
@@ -1401,11 +1386,14 @@ static void i40evf_watchdog_task(struct work_struct *work)
1401 1386
1402 if (adapter->state == __I40EVF_RUNNING) 1387 if (adapter->state == __I40EVF_RUNNING)
1403 i40evf_request_stats(adapter); 1388 i40evf_request_stats(adapter);
1404
1405 i40evf_irq_enable(adapter, true);
1406 i40evf_fire_sw_int(adapter, 0xFF);
1407
1408watchdog_done: 1389watchdog_done:
1390 if (adapter->state == __I40EVF_RUNNING) {
1391 i40evf_irq_enable_queues(adapter, ~0);
1392 i40evf_fire_sw_int(adapter, 0xFF);
1393 } else {
1394 i40evf_fire_sw_int(adapter, 0x1);
1395 }
1396
1409 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); 1397 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1410restart_watchdog: 1398restart_watchdog:
1411 if (adapter->state == __I40EVF_REMOVE) 1399 if (adapter->state == __I40EVF_REMOVE)
@@ -1633,17 +1621,17 @@ static void i40evf_adminq_task(struct work_struct *work)
1633 u16 pending; 1621 u16 pending;
1634 1622
1635 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) 1623 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
1636 return; 1624 goto out;
1637 1625
1638 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; 1626 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
1639 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); 1627 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1640 if (!event.msg_buf) 1628 if (!event.msg_buf)
1641 return; 1629 goto out;
1642 1630
1643 v_msg = (struct i40e_virtchnl_msg *)&event.desc; 1631 v_msg = (struct i40e_virtchnl_msg *)&event.desc;
1644 do { 1632 do {
1645 ret = i40evf_clean_arq_element(hw, &event, &pending); 1633 ret = i40evf_clean_arq_element(hw, &event, &pending);
1646 if (ret) 1634 if (ret || !v_msg->v_opcode)
1647 break; /* No event to process or error cleaning ARQ */ 1635 break; /* No event to process or error cleaning ARQ */
1648 1636
1649 i40evf_virtchnl_completion(adapter, v_msg->v_opcode, 1637 i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
@@ -1688,10 +1676,10 @@ static void i40evf_adminq_task(struct work_struct *work)
1688 if (oldval != val) 1676 if (oldval != val)
1689 wr32(hw, hw->aq.asq.len, val); 1677 wr32(hw, hw->aq.asq.len, val);
1690 1678
1679 kfree(event.msg_buf);
1680out:
1691 /* re-enable Admin queue interrupt cause */ 1681 /* re-enable Admin queue interrupt cause */
1692 i40evf_misc_irq_enable(adapter); 1682 i40evf_misc_irq_enable(adapter);
1693
1694 kfree(event.msg_buf);
1695} 1683}
1696 1684
1697/** 1685/**
@@ -2053,12 +2041,8 @@ static void i40evf_init_task(struct work_struct *work)
2053 /* aq msg sent, awaiting reply */ 2041 /* aq msg sent, awaiting reply */
2054 err = i40evf_verify_api_ver(adapter); 2042 err = i40evf_verify_api_ver(adapter);
2055 if (err) { 2043 if (err) {
2056 dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n", 2044 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
2057 err);
2058 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
2059 dev_info(&pdev->dev, "Resending request\n");
2060 err = i40evf_send_api_ver(adapter); 2045 err = i40evf_send_api_ver(adapter);
2061 }
2062 goto err; 2046 goto err;
2063 } 2047 }
2064 err = i40evf_send_vf_config_msg(adapter); 2048 err = i40evf_send_vf_config_msg(adapter);
@@ -2081,7 +2065,6 @@ static void i40evf_init_task(struct work_struct *work)
2081 } 2065 }
2082 err = i40evf_get_vf_config(adapter); 2066 err = i40evf_get_vf_config(adapter);
2083 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { 2067 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
2084 dev_info(&pdev->dev, "Resending VF config request\n");
2085 err = i40evf_send_vf_config_msg(adapter); 2068 err = i40evf_send_vf_config_msg(adapter);
2086 goto err; 2069 goto err;
2087 } 2070 }
@@ -2230,12 +2213,18 @@ err:
2230static void i40evf_shutdown(struct pci_dev *pdev) 2213static void i40evf_shutdown(struct pci_dev *pdev)
2231{ 2214{
2232 struct net_device *netdev = pci_get_drvdata(pdev); 2215 struct net_device *netdev = pci_get_drvdata(pdev);
2216 struct i40evf_adapter *adapter = netdev_priv(netdev);
2233 2217
2234 netif_device_detach(netdev); 2218 netif_device_detach(netdev);
2235 2219
2236 if (netif_running(netdev)) 2220 if (netif_running(netdev))
2237 i40evf_close(netdev); 2221 i40evf_close(netdev);
2238 2222
2223 /* Prevent the watchdog from running. */
2224 adapter->state = __I40EVF_REMOVE;
2225 adapter->aq_required = 0;
2226 adapter->aq_pending = 0;
2227
2239#ifdef CONFIG_PM 2228#ifdef CONFIG_PM
2240 pci_save_state(pdev); 2229 pci_save_state(pdev);
2241 2230
@@ -2448,7 +2437,18 @@ static void i40evf_remove(struct pci_dev *pdev)
2448 unregister_netdev(netdev); 2437 unregister_netdev(netdev);
2449 adapter->netdev_registered = false; 2438 adapter->netdev_registered = false;
2450 } 2439 }
2440
2441 /* Shut down all the garbage mashers on the detention level */
2451 adapter->state = __I40EVF_REMOVE; 2442 adapter->state = __I40EVF_REMOVE;
2443 adapter->aq_required = 0;
2444 adapter->aq_pending = 0;
2445 i40evf_request_reset(adapter);
2446 msleep(20);
2447 /* If the FW isn't responding, kick it once, but only once. */
2448 if (!i40evf_asq_done(hw)) {
2449 i40evf_request_reset(adapter);
2450 msleep(20);
2451 }
2452 2452
2453 if (adapter->msix_entries) { 2453 if (adapter->msix_entries) {
2454 i40evf_misc_irq_disable(adapter); 2454 i40evf_misc_irq_disable(adapter);
@@ -2477,6 +2477,10 @@ static void i40evf_remove(struct pci_dev *pdev)
2477 list_del(&f->list); 2477 list_del(&f->list);
2478 kfree(f); 2478 kfree(f);
2479 } 2479 }
2480 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
2481 list_del(&f->list);
2482 kfree(f);
2483 }
2480 2484
2481 free_netdev(netdev); 2485 free_netdev(netdev);
2482 2486
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 5fde5a7f4591..3f0c85ecbca6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -715,14 +715,14 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
715 } 715 }
716 return; 716 return;
717 } 717 }
718 if (v_opcode != adapter->current_op)
719 dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n",
720 adapter->current_op, v_opcode);
721 if (v_retval) { 718 if (v_retval) {
722 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", 719 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
723 __func__, v_retval, v_opcode); 720 __func__, v_retval, v_opcode);
724 } 721 }
725 switch (v_opcode) { 722 switch (v_opcode) {
723 case I40E_VIRTCHNL_OP_VERSION:
724 /* no action, but also not an error */
725 break;
726 case I40E_VIRTCHNL_OP_GET_STATS: { 726 case I40E_VIRTCHNL_OP_GET_STATS: {
727 struct i40e_eth_stats *stats = 727 struct i40e_eth_stats *stats =
728 (struct i40e_eth_stats *)msg; 728 (struct i40e_eth_stats *)msg;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 82d891e183b1..c2bd4f98a837 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -29,7 +29,7 @@
29#include "e1000_mac.h" 29#include "e1000_mac.h"
30#include "e1000_82575.h" 30#include "e1000_82575.h"
31 31
32#include <linux/clocksource.h> 32#include <linux/timecounter.h>
33#include <linux/net_tstamp.h> 33#include <linux/net_tstamp.h>
34#include <linux/ptp_clock_kernel.h> 34#include <linux/ptp_clock_kernel.h>
35#include <linux/bitops.h> 35#include <linux/bitops.h>
@@ -343,6 +343,9 @@ struct hwmon_buff {
343 }; 343 };
344#endif 344#endif
345 345
346#define IGB_N_EXTTS 2
347#define IGB_N_PEROUT 2
348#define IGB_N_SDP 4
346#define IGB_RETA_SIZE 128 349#define IGB_RETA_SIZE 128
347 350
348/* board specific private data structure */ 351/* board specific private data structure */
@@ -439,6 +442,12 @@ struct igb_adapter {
439 u32 tx_hwtstamp_timeouts; 442 u32 tx_hwtstamp_timeouts;
440 u32 rx_hwtstamp_cleared; 443 u32 rx_hwtstamp_cleared;
441 444
445 struct ptp_pin_desc sdp_config[IGB_N_SDP];
446 struct {
447 struct timespec start;
448 struct timespec period;
449 } perout[IGB_N_PEROUT];
450
442 char fw_version[32]; 451 char fw_version[32];
443#ifdef CONFIG_IGB_HWMON 452#ifdef CONFIG_IGB_HWMON
444 struct hwmon_buff *igb_hwmon_buff; 453 struct hwmon_buff *igb_hwmon_buff;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ff59897a9463..f366b3b96d03 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5035,9 +5035,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5035 5035
5036 skb_tx_timestamp(skb); 5036 skb_tx_timestamp(skb);
5037 5037
5038 if (vlan_tx_tag_present(skb)) { 5038 if (skb_vlan_tag_present(skb)) {
5039 tx_flags |= IGB_TX_FLAGS_VLAN; 5039 tx_flags |= IGB_TX_FLAGS_VLAN;
5040 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 5040 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
5041 } 5041 }
5042 5042
5043 /* record initial flags and protocol */ 5043 /* record initial flags and protocol */
@@ -5384,6 +5384,80 @@ void igb_update_stats(struct igb_adapter *adapter,
5384 } 5384 }
5385} 5385}
5386 5386
5387static void igb_tsync_interrupt(struct igb_adapter *adapter)
5388{
5389 struct e1000_hw *hw = &adapter->hw;
5390 struct ptp_clock_event event;
5391 struct timespec ts;
5392 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
5393
5394 if (tsicr & TSINTR_SYS_WRAP) {
5395 event.type = PTP_CLOCK_PPS;
5396 if (adapter->ptp_caps.pps)
5397 ptp_clock_event(adapter->ptp_clock, &event);
5398 else
5399 dev_err(&adapter->pdev->dev, "unexpected SYS WRAP");
5400 ack |= TSINTR_SYS_WRAP;
5401 }
5402
5403 if (tsicr & E1000_TSICR_TXTS) {
5404 /* retrieve hardware timestamp */
5405 schedule_work(&adapter->ptp_tx_work);
5406 ack |= E1000_TSICR_TXTS;
5407 }
5408
5409 if (tsicr & TSINTR_TT0) {
5410 spin_lock(&adapter->tmreg_lock);
5411 ts = timespec_add(adapter->perout[0].start,
5412 adapter->perout[0].period);
5413 wr32(E1000_TRGTTIML0, ts.tv_nsec);
5414 wr32(E1000_TRGTTIMH0, ts.tv_sec);
5415 tsauxc = rd32(E1000_TSAUXC);
5416 tsauxc |= TSAUXC_EN_TT0;
5417 wr32(E1000_TSAUXC, tsauxc);
5418 adapter->perout[0].start = ts;
5419 spin_unlock(&adapter->tmreg_lock);
5420 ack |= TSINTR_TT0;
5421 }
5422
5423 if (tsicr & TSINTR_TT1) {
5424 spin_lock(&adapter->tmreg_lock);
5425 ts = timespec_add(adapter->perout[1].start,
5426 adapter->perout[1].period);
5427 wr32(E1000_TRGTTIML1, ts.tv_nsec);
5428 wr32(E1000_TRGTTIMH1, ts.tv_sec);
5429 tsauxc = rd32(E1000_TSAUXC);
5430 tsauxc |= TSAUXC_EN_TT1;
5431 wr32(E1000_TSAUXC, tsauxc);
5432 adapter->perout[1].start = ts;
5433 spin_unlock(&adapter->tmreg_lock);
5434 ack |= TSINTR_TT1;
5435 }
5436
5437 if (tsicr & TSINTR_AUTT0) {
5438 nsec = rd32(E1000_AUXSTMPL0);
5439 sec = rd32(E1000_AUXSTMPH0);
5440 event.type = PTP_CLOCK_EXTTS;
5441 event.index = 0;
5442 event.timestamp = sec * 1000000000ULL + nsec;
5443 ptp_clock_event(adapter->ptp_clock, &event);
5444 ack |= TSINTR_AUTT0;
5445 }
5446
5447 if (tsicr & TSINTR_AUTT1) {
5448 nsec = rd32(E1000_AUXSTMPL1);
5449 sec = rd32(E1000_AUXSTMPH1);
5450 event.type = PTP_CLOCK_EXTTS;
5451 event.index = 1;
5452 event.timestamp = sec * 1000000000ULL + nsec;
5453 ptp_clock_event(adapter->ptp_clock, &event);
5454 ack |= TSINTR_AUTT1;
5455 }
5456
5457 /* acknowledge the interrupts */
5458 wr32(E1000_TSICR, ack);
5459}
5460
5387static irqreturn_t igb_msix_other(int irq, void *data) 5461static irqreturn_t igb_msix_other(int irq, void *data)
5388{ 5462{
5389 struct igb_adapter *adapter = data; 5463 struct igb_adapter *adapter = data;
@@ -5415,16 +5489,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
5415 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5489 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5416 } 5490 }
5417 5491
5418 if (icr & E1000_ICR_TS) { 5492 if (icr & E1000_ICR_TS)
5419 u32 tsicr = rd32(E1000_TSICR); 5493 igb_tsync_interrupt(adapter);
5420
5421 if (tsicr & E1000_TSICR_TXTS) {
5422 /* acknowledge the interrupt */
5423 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5424 /* retrieve hardware timestamp */
5425 schedule_work(&adapter->ptp_tx_work);
5426 }
5427 }
5428 5494
5429 wr32(E1000_EIMS, adapter->eims_other); 5495 wr32(E1000_EIMS, adapter->eims_other);
5430 5496
@@ -6011,8 +6077,12 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
6011 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; 6077 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
6012 6078
6013 /* reply to reset with ack and vf mac address */ 6079 /* reply to reset with ack and vf mac address */
6014 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 6080 if (!is_zero_ether_addr(vf_mac)) {
6015 memcpy(addr, vf_mac, ETH_ALEN); 6081 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
6082 memcpy(addr, vf_mac, ETH_ALEN);
6083 } else {
6084 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
6085 }
6016 igb_write_mbx(hw, msgbuf, 3, vf); 6086 igb_write_mbx(hw, msgbuf, 3, vf);
6017} 6087}
6018 6088
@@ -6203,16 +6273,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
6203 mod_timer(&adapter->watchdog_timer, jiffies + 1); 6273 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6204 } 6274 }
6205 6275
6206 if (icr & E1000_ICR_TS) { 6276 if (icr & E1000_ICR_TS)
6207 u32 tsicr = rd32(E1000_TSICR); 6277 igb_tsync_interrupt(adapter);
6208
6209 if (tsicr & E1000_TSICR_TXTS) {
6210 /* acknowledge the interrupt */
6211 wr32(E1000_TSICR, E1000_TSICR_TXTS);
6212 /* retrieve hardware timestamp */
6213 schedule_work(&adapter->ptp_tx_work);
6214 }
6215 }
6216 6278
6217 napi_schedule(&q_vector->napi); 6279 napi_schedule(&q_vector->napi);
6218 6280
@@ -6257,16 +6319,8 @@ static irqreturn_t igb_intr(int irq, void *data)
6257 mod_timer(&adapter->watchdog_timer, jiffies + 1); 6319 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6258 } 6320 }
6259 6321
6260 if (icr & E1000_ICR_TS) { 6322 if (icr & E1000_ICR_TS)
6261 u32 tsicr = rd32(E1000_TSICR); 6323 igb_tsync_interrupt(adapter);
6262
6263 if (tsicr & E1000_TSICR_TXTS) {
6264 /* acknowledge the interrupt */
6265 wr32(E1000_TSICR, E1000_TSICR_TXTS);
6266 /* retrieve hardware timestamp */
6267 schedule_work(&adapter->ptp_tx_work);
6268 }
6269 }
6270 6324
6271 napi_schedule(&q_vector->napi); 6325 napi_schedule(&q_vector->napi);
6272 6326
@@ -6527,15 +6581,17 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6527 DMA_FROM_DEVICE); 6581 DMA_FROM_DEVICE);
6528} 6582}
6529 6583
6584static inline bool igb_page_is_reserved(struct page *page)
6585{
6586 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
6587}
6588
6530static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, 6589static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6531 struct page *page, 6590 struct page *page,
6532 unsigned int truesize) 6591 unsigned int truesize)
6533{ 6592{
6534 /* avoid re-using remote pages */ 6593 /* avoid re-using remote pages */
6535 if (unlikely(page_to_nid(page) != numa_node_id())) 6594 if (unlikely(igb_page_is_reserved(page)))
6536 return false;
6537
6538 if (unlikely(page->pfmemalloc))
6539 return false; 6595 return false;
6540 6596
6541#if (PAGE_SIZE < 8192) 6597#if (PAGE_SIZE < 8192)
@@ -6545,22 +6601,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6545 6601
6546 /* flip page offset to other buffer */ 6602 /* flip page offset to other buffer */
6547 rx_buffer->page_offset ^= IGB_RX_BUFSZ; 6603 rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6548
6549 /* Even if we own the page, we are not allowed to use atomic_set()
6550 * This would break get_page_unless_zero() users.
6551 */
6552 atomic_inc(&page->_count);
6553#else 6604#else
6554 /* move offset up to the next cache line */ 6605 /* move offset up to the next cache line */
6555 rx_buffer->page_offset += truesize; 6606 rx_buffer->page_offset += truesize;
6556 6607
6557 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) 6608 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
6558 return false; 6609 return false;
6559
6560 /* bump ref count on page before it is given to the stack */
6561 get_page(page);
6562#endif 6610#endif
6563 6611
6612 /* Even if we own the page, we are not allowed to use atomic_set()
6613 * This would break get_page_unless_zero() users.
6614 */
6615 atomic_inc(&page->_count);
6616
6564 return true; 6617 return true;
6565} 6618}
6566 6619
@@ -6603,13 +6656,12 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6603 6656
6604 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 6657 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6605 6658
6606 /* we can reuse buffer as-is, just make sure it is local */ 6659 /* page is not reserved, we can reuse buffer as-is */
6607 if (likely((page_to_nid(page) == numa_node_id()) && 6660 if (likely(!igb_page_is_reserved(page)))
6608 !page->pfmemalloc))
6609 return true; 6661 return true;
6610 6662
6611 /* this page cannot be reused so discard it */ 6663 /* this page cannot be reused so discard it */
6612 put_page(page); 6664 __free_page(page);
6613 return false; 6665 return false;
6614 } 6666 }
6615 6667
@@ -6627,7 +6679,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6627 struct page *page; 6679 struct page *page;
6628 6680
6629 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 6681 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6630
6631 page = rx_buffer->page; 6682 page = rx_buffer->page;
6632 prefetchw(page); 6683 prefetchw(page);
6633 6684
@@ -7042,8 +7093,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7042 i -= rx_ring->count; 7093 i -= rx_ring->count;
7043 } 7094 }
7044 7095
7045 /* clear the hdr_addr for the next_to_use descriptor */ 7096 /* clear the status bits for the next_to_use descriptor */
7046 rx_desc->read.hdr_addr = 0; 7097 rx_desc->wb.upper.status_error = 0;
7047 7098
7048 cleaned_count--; 7099 cleaned_count--;
7049 } while (cleaned_count); 7100 } while (cleaned_count);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 794c139f0cc0..d20fc8ed11f1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -256,14 +256,9 @@ static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta)
256 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, 256 struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
257 ptp_caps); 257 ptp_caps);
258 unsigned long flags; 258 unsigned long flags;
259 s64 now;
260 259
261 spin_lock_irqsave(&igb->tmreg_lock, flags); 260 spin_lock_irqsave(&igb->tmreg_lock, flags);
262 261 timecounter_adjtime(&igb->tc, delta);
263 now = timecounter_read(&igb->tc);
264 now += delta;
265 timecounter_init(&igb->tc, &igb->cc, now);
266
267 spin_unlock_irqrestore(&igb->tmreg_lock, flags); 262 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
268 263
269 return 0; 264 return 0;
@@ -360,12 +355,239 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
360 return 0; 355 return 0;
361} 356}
362 357
358static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
359{
360 u32 *ptr = pin < 2 ? ctrl : ctrl_ext;
361 u32 mask[IGB_N_SDP] = {
362 E1000_CTRL_SDP0_DIR,
363 E1000_CTRL_SDP1_DIR,
364 E1000_CTRL_EXT_SDP2_DIR,
365 E1000_CTRL_EXT_SDP3_DIR,
366 };
367
368 if (input)
369 *ptr &= ~mask[pin];
370 else
371 *ptr |= mask[pin];
372}
373
374static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
375{
376 struct e1000_hw *hw = &igb->hw;
377 u32 aux0_sel_sdp[IGB_N_SDP] = {
378 AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
379 };
380 u32 aux1_sel_sdp[IGB_N_SDP] = {
381 AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
382 };
383 u32 ts_sdp_en[IGB_N_SDP] = {
384 TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
385 };
386 u32 ctrl, ctrl_ext, tssdp = 0;
387
388 ctrl = rd32(E1000_CTRL);
389 ctrl_ext = rd32(E1000_CTRL_EXT);
390 tssdp = rd32(E1000_TSSDP);
391
392 igb_pin_direction(pin, 1, &ctrl, &ctrl_ext);
393
394 /* Make sure this pin is not enabled as an output. */
395 tssdp &= ~ts_sdp_en[pin];
396
397 if (chan == 1) {
398 tssdp &= ~AUX1_SEL_SDP3;
399 tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN;
400 } else {
401 tssdp &= ~AUX0_SEL_SDP3;
402 tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN;
403 }
404
405 wr32(E1000_TSSDP, tssdp);
406 wr32(E1000_CTRL, ctrl);
407 wr32(E1000_CTRL_EXT, ctrl_ext);
408}
409
410static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
411{
412 struct e1000_hw *hw = &igb->hw;
413 u32 aux0_sel_sdp[IGB_N_SDP] = {
414 AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
415 };
416 u32 aux1_sel_sdp[IGB_N_SDP] = {
417 AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
418 };
419 u32 ts_sdp_en[IGB_N_SDP] = {
420 TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
421 };
422 u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
423 TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0,
424 TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0,
425 };
426 u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
427 TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
428 TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
429 };
430 u32 ts_sdp_sel_clr[IGB_N_SDP] = {
431 TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
432 TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
433 };
434 u32 ctrl, ctrl_ext, tssdp = 0;
435
436 ctrl = rd32(E1000_CTRL);
437 ctrl_ext = rd32(E1000_CTRL_EXT);
438 tssdp = rd32(E1000_TSSDP);
439
440 igb_pin_direction(pin, 0, &ctrl, &ctrl_ext);
441
442 /* Make sure this pin is not enabled as an input. */
443 if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin])
444 tssdp &= ~AUX0_TS_SDP_EN;
445
446 if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin])
447 tssdp &= ~AUX1_TS_SDP_EN;
448
449 tssdp &= ~ts_sdp_sel_clr[pin];
450 if (chan == 1)
451 tssdp |= ts_sdp_sel_tt1[pin];
452 else
453 tssdp |= ts_sdp_sel_tt0[pin];
454
455 tssdp |= ts_sdp_en[pin];
456
457 wr32(E1000_TSSDP, tssdp);
458 wr32(E1000_CTRL, ctrl);
459 wr32(E1000_CTRL_EXT, ctrl_ext);
460}
461
462static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
463 struct ptp_clock_request *rq, int on)
464{
465 struct igb_adapter *igb =
466 container_of(ptp, struct igb_adapter, ptp_caps);
467 struct e1000_hw *hw = &igb->hw;
468 u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
469 unsigned long flags;
470 struct timespec ts;
471 int pin;
472 s64 ns;
473
474 switch (rq->type) {
475 case PTP_CLK_REQ_EXTTS:
476 if (on) {
477 pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
478 rq->extts.index);
479 if (pin < 0)
480 return -EBUSY;
481 }
482 if (rq->extts.index == 1) {
483 tsauxc_mask = TSAUXC_EN_TS1;
484 tsim_mask = TSINTR_AUTT1;
485 } else {
486 tsauxc_mask = TSAUXC_EN_TS0;
487 tsim_mask = TSINTR_AUTT0;
488 }
489 spin_lock_irqsave(&igb->tmreg_lock, flags);
490 tsauxc = rd32(E1000_TSAUXC);
491 tsim = rd32(E1000_TSIM);
492 if (on) {
493 igb_pin_extts(igb, rq->extts.index, pin);
494 tsauxc |= tsauxc_mask;
495 tsim |= tsim_mask;
496 } else {
497 tsauxc &= ~tsauxc_mask;
498 tsim &= ~tsim_mask;
499 }
500 wr32(E1000_TSAUXC, tsauxc);
501 wr32(E1000_TSIM, tsim);
502 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
503 return 0;
504
505 case PTP_CLK_REQ_PEROUT:
506 if (on) {
507 pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
508 rq->perout.index);
509 if (pin < 0)
510 return -EBUSY;
511 }
512 ts.tv_sec = rq->perout.period.sec;
513 ts.tv_nsec = rq->perout.period.nsec;
514 ns = timespec_to_ns(&ts);
515 ns = ns >> 1;
516 if (on && ns < 500000LL) {
517 /* 2k interrupts per second is an awful lot. */
518 return -EINVAL;
519 }
520 ts = ns_to_timespec(ns);
521 if (rq->perout.index == 1) {
522 tsauxc_mask = TSAUXC_EN_TT1;
523 tsim_mask = TSINTR_TT1;
524 trgttiml = E1000_TRGTTIML1;
525 trgttimh = E1000_TRGTTIMH1;
526 } else {
527 tsauxc_mask = TSAUXC_EN_TT0;
528 tsim_mask = TSINTR_TT0;
529 trgttiml = E1000_TRGTTIML0;
530 trgttimh = E1000_TRGTTIMH0;
531 }
532 spin_lock_irqsave(&igb->tmreg_lock, flags);
533 tsauxc = rd32(E1000_TSAUXC);
534 tsim = rd32(E1000_TSIM);
535 if (on) {
536 int i = rq->perout.index;
537
538 igb_pin_perout(igb, i, pin);
539 igb->perout[i].start.tv_sec = rq->perout.start.sec;
540 igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
541 igb->perout[i].period.tv_sec = ts.tv_sec;
542 igb->perout[i].period.tv_nsec = ts.tv_nsec;
543 wr32(trgttiml, rq->perout.start.sec);
544 wr32(trgttimh, rq->perout.start.nsec);
545 tsauxc |= tsauxc_mask;
546 tsim |= tsim_mask;
547 } else {
548 tsauxc &= ~tsauxc_mask;
549 tsim &= ~tsim_mask;
550 }
551 wr32(E1000_TSAUXC, tsauxc);
552 wr32(E1000_TSIM, tsim);
553 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
554 return 0;
555
556 case PTP_CLK_REQ_PPS:
557 spin_lock_irqsave(&igb->tmreg_lock, flags);
558 tsim = rd32(E1000_TSIM);
559 if (on)
560 tsim |= TSINTR_SYS_WRAP;
561 else
562 tsim &= ~TSINTR_SYS_WRAP;
563 wr32(E1000_TSIM, tsim);
564 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
565 return 0;
566 }
567
568 return -EOPNOTSUPP;
569}
570
363static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, 571static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
364 struct ptp_clock_request *rq, int on) 572 struct ptp_clock_request *rq, int on)
365{ 573{
366 return -EOPNOTSUPP; 574 return -EOPNOTSUPP;
367} 575}
368 576
577static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
578 enum ptp_pin_function func, unsigned int chan)
579{
580 switch (func) {
581 case PTP_PF_NONE:
582 case PTP_PF_EXTTS:
583 case PTP_PF_PEROUT:
584 break;
585 case PTP_PF_PHYSYNC:
586 return -1;
587 }
588 return 0;
589}
590
369/** 591/**
370 * igb_ptp_tx_work 592 * igb_ptp_tx_work
371 * @work: pointer to work struct 593 * @work: pointer to work struct
@@ -756,6 +978,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
756{ 978{
757 struct e1000_hw *hw = &adapter->hw; 979 struct e1000_hw *hw = &adapter->hw;
758 struct net_device *netdev = adapter->netdev; 980 struct net_device *netdev = adapter->netdev;
981 int i;
759 982
760 switch (hw->mac.type) { 983 switch (hw->mac.type) {
761 case e1000_82576: 984 case e1000_82576:
@@ -770,7 +993,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
770 adapter->ptp_caps.settime = igb_ptp_settime_82576; 993 adapter->ptp_caps.settime = igb_ptp_settime_82576;
771 adapter->ptp_caps.enable = igb_ptp_feature_enable; 994 adapter->ptp_caps.enable = igb_ptp_feature_enable;
772 adapter->cc.read = igb_ptp_read_82576; 995 adapter->cc.read = igb_ptp_read_82576;
773 adapter->cc.mask = CLOCKSOURCE_MASK(64); 996 adapter->cc.mask = CYCLECOUNTER_MASK(64);
774 adapter->cc.mult = 1; 997 adapter->cc.mult = 1;
775 adapter->cc.shift = IGB_82576_TSYNC_SHIFT; 998 adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
776 /* Dial the nominal frequency. */ 999 /* Dial the nominal frequency. */
@@ -790,7 +1013,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
790 adapter->ptp_caps.settime = igb_ptp_settime_82576; 1013 adapter->ptp_caps.settime = igb_ptp_settime_82576;
791 adapter->ptp_caps.enable = igb_ptp_feature_enable; 1014 adapter->ptp_caps.enable = igb_ptp_feature_enable;
792 adapter->cc.read = igb_ptp_read_82580; 1015 adapter->cc.read = igb_ptp_read_82580;
793 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); 1016 adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
794 adapter->cc.mult = 1; 1017 adapter->cc.mult = 1;
795 adapter->cc.shift = 0; 1018 adapter->cc.shift = 0;
796 /* Enable the timer functions by clearing bit 31. */ 1019 /* Enable the timer functions by clearing bit 31. */
@@ -798,16 +1021,27 @@ void igb_ptp_init(struct igb_adapter *adapter)
798 break; 1021 break;
799 case e1000_i210: 1022 case e1000_i210:
800 case e1000_i211: 1023 case e1000_i211:
1024 for (i = 0; i < IGB_N_SDP; i++) {
1025 struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
1026
1027 snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
1028 ppd->index = i;
1029 ppd->func = PTP_PF_NONE;
1030 }
801 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 1031 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
802 adapter->ptp_caps.owner = THIS_MODULE; 1032 adapter->ptp_caps.owner = THIS_MODULE;
803 adapter->ptp_caps.max_adj = 62499999; 1033 adapter->ptp_caps.max_adj = 62499999;
804 adapter->ptp_caps.n_ext_ts = 0; 1034 adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
805 adapter->ptp_caps.pps = 0; 1035 adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
1036 adapter->ptp_caps.n_pins = IGB_N_SDP;
1037 adapter->ptp_caps.pps = 1;
1038 adapter->ptp_caps.pin_config = adapter->sdp_config;
806 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; 1039 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
807 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; 1040 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
808 adapter->ptp_caps.gettime = igb_ptp_gettime_i210; 1041 adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
809 adapter->ptp_caps.settime = igb_ptp_settime_i210; 1042 adapter->ptp_caps.settime = igb_ptp_settime_i210;
810 adapter->ptp_caps.enable = igb_ptp_feature_enable; 1043 adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
1044 adapter->ptp_caps.verify = igb_ptp_verify_pin;
811 /* Enable the timer functions by clearing bit 31. */ 1045 /* Enable the timer functions by clearing bit 31. */
812 wr32(E1000_TSAUXC, 0x0); 1046 wr32(E1000_TSAUXC, 0x0);
813 break; 1047 break;
@@ -905,6 +1139,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
905void igb_ptp_reset(struct igb_adapter *adapter) 1139void igb_ptp_reset(struct igb_adapter *adapter)
906{ 1140{
907 struct e1000_hw *hw = &adapter->hw; 1141 struct e1000_hw *hw = &adapter->hw;
1142 unsigned long flags;
908 1143
909 if (!(adapter->flags & IGB_FLAG_PTP)) 1144 if (!(adapter->flags & IGB_FLAG_PTP))
910 return; 1145 return;
@@ -912,6 +1147,8 @@ void igb_ptp_reset(struct igb_adapter *adapter)
912 /* reset the tstamp_config */ 1147 /* reset the tstamp_config */
913 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); 1148 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
914 1149
1150 spin_lock_irqsave(&adapter->tmreg_lock, flags);
1151
915 switch (adapter->hw.mac.type) { 1152 switch (adapter->hw.mac.type) {
916 case e1000_82576: 1153 case e1000_82576:
917 /* Dial the nominal frequency. */ 1154 /* Dial the nominal frequency. */
@@ -922,23 +1159,25 @@ void igb_ptp_reset(struct igb_adapter *adapter)
922 case e1000_i350: 1159 case e1000_i350:
923 case e1000_i210: 1160 case e1000_i210:
924 case e1000_i211: 1161 case e1000_i211:
925 /* Enable the timer functions and interrupts. */
926 wr32(E1000_TSAUXC, 0x0); 1162 wr32(E1000_TSAUXC, 0x0);
1163 wr32(E1000_TSSDP, 0x0);
927 wr32(E1000_TSIM, TSYNC_INTERRUPTS); 1164 wr32(E1000_TSIM, TSYNC_INTERRUPTS);
928 wr32(E1000_IMS, E1000_IMS_TS); 1165 wr32(E1000_IMS, E1000_IMS_TS);
929 break; 1166 break;
930 default: 1167 default:
931 /* No work to do. */ 1168 /* No work to do. */
932 return; 1169 goto out;
933 } 1170 }
934 1171
935 /* Re-initialize the timer. */ 1172 /* Re-initialize the timer. */
936 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { 1173 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
937 struct timespec ts = ktime_to_timespec(ktime_get_real()); 1174 struct timespec ts = ktime_to_timespec(ktime_get_real());
938 1175
939 igb_ptp_settime_i210(&adapter->ptp_caps, &ts); 1176 igb_ptp_write_i210(adapter, &ts);
940 } else { 1177 } else {
941 timecounter_init(&adapter->tc, &adapter->cc, 1178 timecounter_init(&adapter->tc, &adapter->cc,
942 ktime_to_ns(ktime_get_real())); 1179 ktime_to_ns(ktime_get_real()));
943 } 1180 }
1181out:
1182 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
944} 1183}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index edea13b0ee85..ebf9d4a42fdd 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2237,9 +2237,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2237 return NETDEV_TX_BUSY; 2237 return NETDEV_TX_BUSY;
2238 } 2238 }
2239 2239
2240 if (vlan_tx_tag_present(skb)) { 2240 if (skb_vlan_tag_present(skb)) {
2241 tx_flags |= IGBVF_TX_FLAGS_VLAN; 2241 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2242 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); 2242 tx_flags |= (skb_vlan_tag_get(skb) <<
2243 IGBVF_TX_FLAGS_VLAN_SHIFT);
2243 } 2244 }
2244 2245
2245 if (protocol == htons(ETH_P_IP)) 2246 if (protocol == htons(ETH_P_IP))
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index aa87605b144a..11a1bdbe3fd9 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1532,9 +1532,9 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1532 DESC_NEEDED))) 1532 DESC_NEEDED)))
1533 return NETDEV_TX_BUSY; 1533 return NETDEV_TX_BUSY;
1534 1534
1535 if (vlan_tx_tag_present(skb)) { 1535 if (skb_vlan_tag_present(skb)) {
1536 tx_flags |= IXGB_TX_FLAGS_VLAN; 1536 tx_flags |= IXGB_TX_FLAGS_VLAN;
1537 vlan_id = vlan_tx_tag_get(skb); 1537 vlan_id = skb_vlan_tag_get(skb);
1538 } 1538 }
1539 1539
1540 first = adapter->tx_ring.next_to_use; 1540 first = adapter->tx_ring.next_to_use;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b6137be43920..7dcbbec09a70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -38,7 +38,7 @@
38#include <linux/if_vlan.h> 38#include <linux/if_vlan.h>
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40 40
41#include <linux/clocksource.h> 41#include <linux/timecounter.h>
42#include <linux/net_tstamp.h> 42#include <linux/net_tstamp.h>
43#include <linux/ptp_clock_kernel.h> 43#include <linux/ptp_clock_kernel.h>
44 44
@@ -76,6 +76,8 @@
76#define IXGBE_MAX_RXD 4096 76#define IXGBE_MAX_RXD 4096
77#define IXGBE_MIN_RXD 64 77#define IXGBE_MIN_RXD 64
78 78
79#define IXGBE_ETH_P_LLDP 0x88CC
80
79/* flow control */ 81/* flow control */
80#define IXGBE_MIN_FCRTL 0x40 82#define IXGBE_MIN_FCRTL 0x40
81#define IXGBE_MAX_FCRTL 0x7FF80 83#define IXGBE_MAX_FCRTL 0x7FF80
@@ -753,6 +755,7 @@ struct ixgbe_adapter {
753 u32 timer_event_accumulator; 755 u32 timer_event_accumulator;
754 u32 vferr_refcount; 756 u32 vferr_refcount;
755 struct ixgbe_mac_addr *mac_table; 757 struct ixgbe_mac_addr *mac_table;
758 u16 vxlan_port;
756 struct kobject *info_kobj; 759 struct kobject *info_kobj;
757#ifdef CONFIG_IXGBE_HWMON 760#ifdef CONFIG_IXGBE_HWMON
758 struct hwmon_buff *ixgbe_hwmon_buff; 761 struct hwmon_buff *ixgbe_hwmon_buff;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 67b02bde179e..70cc4c5c0a01 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -50,6 +50,7 @@
50#include <linux/if_bridge.h> 50#include <linux/if_bridge.h>
51#include <linux/prefetch.h> 51#include <linux/prefetch.h>
52#include <scsi/fc/fc_fcoe.h> 52#include <scsi/fc/fc_fcoe.h>
53#include <net/vxlan.h>
53 54
54#ifdef CONFIG_OF 55#ifdef CONFIG_OF
55#include <linux/of_net.h> 56#include <linux/of_net.h>
@@ -1396,12 +1397,23 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1396 union ixgbe_adv_rx_desc *rx_desc, 1397 union ixgbe_adv_rx_desc *rx_desc,
1397 struct sk_buff *skb) 1398 struct sk_buff *skb)
1398{ 1399{
1400 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1401 __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1402 bool encap_pkt = false;
1403
1399 skb_checksum_none_assert(skb); 1404 skb_checksum_none_assert(skb);
1400 1405
1401 /* Rx csum disabled */ 1406 /* Rx csum disabled */
1402 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1407 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1403 return; 1408 return;
1404 1409
1410 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
1411 (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
1412 encap_pkt = true;
1413 skb->encapsulation = 1;
1414 skb->ip_summed = CHECKSUM_NONE;
1415 }
1416
1405 /* if IP and error */ 1417 /* if IP and error */
1406 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && 1418 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1407 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { 1419 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
@@ -1413,8 +1425,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1413 return; 1425 return;
1414 1426
1415 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { 1427 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1416 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1417
1418 /* 1428 /*
1419 * 82599 errata, UDP frames with a 0 checksum can be marked as 1429 * 82599 errata, UDP frames with a 0 checksum can be marked as
1420 * checksum errors. 1430 * checksum errors.
@@ -1429,6 +1439,17 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1429 1439
1430 /* It must be a TCP or UDP packet with a valid checksum */ 1440 /* It must be a TCP or UDP packet with a valid checksum */
1431 skb->ip_summed = CHECKSUM_UNNECESSARY; 1441 skb->ip_summed = CHECKSUM_UNNECESSARY;
1442 if (encap_pkt) {
1443 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1444 return;
1445
1446 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1447 ring->rx_stats.csum_err++;
1448 return;
1449 }
1450 /* If we checked the outer header let the stack know */
1451 skb->csum_level = 1;
1452 }
1432} 1453}
1433 1454
1434static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, 1455static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
@@ -3564,10 +3585,24 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3564 /* Enable MAC Anti-Spoofing */ 3585 /* Enable MAC Anti-Spoofing */
3565 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), 3586 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3566 adapter->num_vfs); 3587 adapter->num_vfs);
3588
3589 /* Ensure LLDP is set for Ethertype Antispoofing if we will be
3590 * calling set_ethertype_anti_spoofing for each VF in loop below
3591 */
3592 if (hw->mac.ops.set_ethertype_anti_spoofing)
3593 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
3594 (IXGBE_ETQF_FILTER_EN | /* enable filter */
3595 IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */
3596 IXGBE_ETH_P_LLDP)); /* LLDP eth type */
3597
3567 /* For VFs that have spoof checking turned off */ 3598 /* For VFs that have spoof checking turned off */
3568 for (i = 0; i < adapter->num_vfs; i++) { 3599 for (i = 0; i < adapter->num_vfs; i++) {
3569 if (!adapter->vfinfo[i].spoofchk_enabled) 3600 if (!adapter->vfinfo[i].spoofchk_enabled)
3570 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); 3601 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3602
3603 /* enable ethertype anti spoofing if hw supports it */
3604 if (hw->mac.ops.set_ethertype_anti_spoofing)
3605 hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
3571 } 3606 }
3572} 3607}
3573 3608
@@ -5627,6 +5662,10 @@ static int ixgbe_open(struct net_device *netdev)
5627 5662
5628 ixgbe_up_complete(adapter); 5663 ixgbe_up_complete(adapter);
5629 5664
5665#if IS_ENABLED(CONFIG_IXGBE_VXLAN)
5666 vxlan_get_rx_port(netdev);
5667
5668#endif
5630 return 0; 5669 return 0;
5631 5670
5632err_set_queues: 5671err_set_queues:
@@ -7217,8 +7256,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7217 first->gso_segs = 1; 7256 first->gso_segs = 1;
7218 7257
7219 /* if we have a HW VLAN tag being added default to the HW one */ 7258 /* if we have a HW VLAN tag being added default to the HW one */
7220 if (vlan_tx_tag_present(skb)) { 7259 if (skb_vlan_tag_present(skb)) {
7221 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; 7260 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
7222 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; 7261 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7223 /* else if it is a SW VLAN check the next protocol and store the tag */ 7262 /* else if it is a SW VLAN check the next protocol and store the tag */
7224 } else if (protocol == htons(ETH_P_8021Q)) { 7263 } else if (protocol == htons(ETH_P_8021Q)) {
@@ -7771,6 +7810,64 @@ static int ixgbe_set_features(struct net_device *netdev,
7771 return 0; 7810 return 0;
7772} 7811}
7773 7812
7813/**
7814 * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
7815 * @dev: The port's netdev
7816 * @sa_family: Socket Family that VXLAN is notifiying us about
7817 * @port: New UDP port number that VXLAN started listening to
7818 **/
7819static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
7820 __be16 port)
7821{
7822 struct ixgbe_adapter *adapter = netdev_priv(dev);
7823 struct ixgbe_hw *hw = &adapter->hw;
7824 u16 new_port = ntohs(port);
7825
7826 if (sa_family == AF_INET6)
7827 return;
7828
7829 if (adapter->vxlan_port == new_port) {
7830 netdev_info(dev, "Port %d already offloaded\n", new_port);
7831 return;
7832 }
7833
7834 if (adapter->vxlan_port) {
7835 netdev_info(dev,
7836 "Hit Max num of UDP ports, not adding port %d\n",
7837 new_port);
7838 return;
7839 }
7840
7841 adapter->vxlan_port = new_port;
7842 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
7843}
7844
7845/**
7846 * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
7847 * @dev: The port's netdev
7848 * @sa_family: Socket Family that VXLAN is notifying us about
7849 * @port: UDP port number that VXLAN stopped listening to
7850 **/
7851static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
7852 __be16 port)
7853{
7854 struct ixgbe_adapter *adapter = netdev_priv(dev);
7855 struct ixgbe_hw *hw = &adapter->hw;
7856 u16 new_port = ntohs(port);
7857
7858 if (sa_family == AF_INET6)
7859 return;
7860
7861 if (adapter->vxlan_port != new_port) {
7862 netdev_info(dev, "Port %d was not found, not deleting\n",
7863 new_port);
7864 return;
7865 }
7866
7867 adapter->vxlan_port = 0;
7868 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0);
7869}
7870
7774static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 7871static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7775 struct net_device *dev, 7872 struct net_device *dev,
7776 const unsigned char *addr, u16 vid, 7873 const unsigned char *addr, u16 vid,
@@ -7786,7 +7883,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7786} 7883}
7787 7884
7788static int ixgbe_ndo_bridge_setlink(struct net_device *dev, 7885static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7789 struct nlmsghdr *nlh) 7886 struct nlmsghdr *nlh, u16 flags)
7790{ 7887{
7791 struct ixgbe_adapter *adapter = netdev_priv(dev); 7888 struct ixgbe_adapter *adapter = netdev_priv(dev);
7792 struct nlattr *attr, *br_spec; 7889 struct nlattr *attr, *br_spec;
@@ -7982,6 +8079,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7982 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 8079 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7983 .ndo_dfwd_add_station = ixgbe_fwd_add, 8080 .ndo_dfwd_add_station = ixgbe_fwd_add,
7984 .ndo_dfwd_del_station = ixgbe_fwd_del, 8081 .ndo_dfwd_del_station = ixgbe_fwd_del,
8082 .ndo_add_vxlan_port = ixgbe_add_vxlan_port,
8083 .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
7985}; 8084};
7986 8085
7987/** 8086/**
@@ -8339,6 +8438,15 @@ skip_sriov:
8339 netdev->priv_flags |= IFF_UNICAST_FLT; 8438 netdev->priv_flags |= IFF_UNICAST_FLT;
8340 netdev->priv_flags |= IFF_SUPP_NOFCS; 8439 netdev->priv_flags |= IFF_SUPP_NOFCS;
8341 8440
8441 switch (adapter->hw.mac.type) {
8442 case ixgbe_mac_X550:
8443 case ixgbe_mac_X550EM_x:
8444 netdev->hw_enc_features |= NETIF_F_RXCSUM;
8445 break;
8446 default:
8447 break;
8448 }
8449
8342#ifdef CONFIG_IXGBE_DCB 8450#ifdef CONFIG_IXGBE_DCB
8343 netdev->dcbnl_ops = &dcbnl_ops; 8451 netdev->dcbnl_ops = &dcbnl_ops;
8344#endif 8452#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5fd4b5271f9a..79c00f57d3e7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -261,18 +261,9 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
261 struct ixgbe_adapter *adapter = 261 struct ixgbe_adapter *adapter =
262 container_of(ptp, struct ixgbe_adapter, ptp_caps); 262 container_of(ptp, struct ixgbe_adapter, ptp_caps);
263 unsigned long flags; 263 unsigned long flags;
264 u64 now;
265 264
266 spin_lock_irqsave(&adapter->tmreg_lock, flags); 265 spin_lock_irqsave(&adapter->tmreg_lock, flags);
267 266 timecounter_adjtime(&adapter->tc, delta);
268 now = timecounter_read(&adapter->tc);
269 now += delta;
270
271 /* reset the timecounter */
272 timecounter_init(&adapter->tc,
273 &adapter->cc,
274 now);
275
276 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 267 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
277 268
278 ixgbe_ptp_setup_sdp(adapter); 269 ixgbe_ptp_setup_sdp(adapter);
@@ -802,7 +793,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
802 793
803 memset(&adapter->cc, 0, sizeof(adapter->cc)); 794 memset(&adapter->cc, 0, sizeof(adapter->cc));
804 adapter->cc.read = ixgbe_ptp_read; 795 adapter->cc.read = ixgbe_ptp_read;
805 adapter->cc.mask = CLOCKSOURCE_MASK(64); 796 adapter->cc.mask = CYCLECOUNTER_MASK(64);
806 adapter->cc.shift = shift; 797 adapter->cc.shift = shift;
807 adapter->cc.mult = 1; 798 adapter->cc.mult = 1;
808 799
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index c76ba90ecc6e..7f37fe7269a7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -101,9 +101,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
101 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 101 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
102 } 102 }
103 103
104 /* We do not support RSS w/ SR-IOV */
105 adapter->ring_feature[RING_F_RSS].limit = 1;
106
107 /* Disable RSC when in SR-IOV mode */ 104 /* Disable RSC when in SR-IOV mode */
108 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | 105 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
109 IXGBE_FLAG2_RSC_ENABLED); 106 IXGBE_FLAG2_RSC_ENABLED);
@@ -1097,14 +1094,12 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
1097 u16 vlan, u8 qos) 1094 u16 vlan, u8 qos)
1098{ 1095{
1099 struct ixgbe_hw *hw = &adapter->hw; 1096 struct ixgbe_hw *hw = &adapter->hw;
1100 int err = 0; 1097 int err;
1101 1098
1102 if (adapter->vfinfo[vf].pf_vlan) 1099 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1103 err = ixgbe_set_vf_vlan(adapter, false,
1104 adapter->vfinfo[vf].pf_vlan,
1105 vf);
1106 if (err) 1100 if (err)
1107 goto out; 1101 goto out;
1102
1108 ixgbe_set_vmvir(adapter, vlan, qos, vf); 1103 ixgbe_set_vmvir(adapter, vlan, qos, vf);
1109 ixgbe_set_vmolr(hw, vf, false); 1104 ixgbe_set_vmolr(hw, vf, false);
1110 if (adapter->vfinfo[vf].spoofchk_enabled) 1105 if (adapter->vfinfo[vf].spoofchk_enabled)
@@ -1143,6 +1138,11 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
1143 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); 1138 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
1144 if (adapter->vfinfo[vf].vlan_count) 1139 if (adapter->vfinfo[vf].vlan_count)
1145 adapter->vfinfo[vf].vlan_count--; 1140 adapter->vfinfo[vf].vlan_count--;
1141
1142 /* disable hide VLAN on X550 */
1143 if (hw->mac.type >= ixgbe_mac_X550)
1144 ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
1145
1146 adapter->vfinfo[vf].pf_vlan = 0; 1146 adapter->vfinfo[vf].pf_vlan = 0;
1147 adapter->vfinfo[vf].pf_qos = 0; 1147 adapter->vfinfo[vf].pf_qos = 0;
1148 1148
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index d101b25dc4b6..fc5ecee56ca8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -378,6 +378,8 @@ struct ixgbe_thermal_sensor_data {
378#define IXGBE_SPOOF_MACAS_MASK 0xFF 378#define IXGBE_SPOOF_MACAS_MASK 0xFF
379#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 379#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
380#define IXGBE_SPOOF_VLANAS_SHIFT 8 380#define IXGBE_SPOOF_VLANAS_SHIFT 8
381#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000
382#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16
381#define IXGBE_PFVFSPOOF_REG_COUNT 8 383#define IXGBE_PFVFSPOOF_REG_COUNT 8
382 384
383#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ 385#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
@@ -399,6 +401,7 @@ struct ixgbe_thermal_sensor_data {
399 401
400#define IXGBE_WUPL 0x05900 402#define IXGBE_WUPL 0x05900
401#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ 403#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
404#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */
402#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ 405#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
403#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host 406#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
404 * Filter Table */ 407 * Filter Table */
@@ -1540,6 +1543,7 @@ enum {
1540#define IXGBE_MAX_ETQF_FILTERS 8 1543#define IXGBE_MAX_ETQF_FILTERS 8
1541#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ 1544#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */
1542#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ 1545#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */
1546#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
1543#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ 1547#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
1544#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ 1548#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
1545#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ 1549#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
@@ -1565,6 +1569,9 @@ enum {
1565#define IXGBE_ETQF_FILTER_FCOE 2 1569#define IXGBE_ETQF_FILTER_FCOE 2
1566#define IXGBE_ETQF_FILTER_1588 3 1570#define IXGBE_ETQF_FILTER_1588 3
1567#define IXGBE_ETQF_FILTER_FIP 4 1571#define IXGBE_ETQF_FILTER_FIP 4
1572#define IXGBE_ETQF_FILTER_LLDP 5
1573#define IXGBE_ETQF_FILTER_LACP 6
1574
1568/* VLAN Control Bit Masks */ 1575/* VLAN Control Bit Masks */
1569#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ 1576#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
1570#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ 1577#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@@ -2122,6 +2129,7 @@ enum {
2122#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 2129#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
2123#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 2130#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
2124#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ 2131#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
2132#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */
2125#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ 2133#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
2126#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ 2134#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
2127#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ 2135#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
@@ -2139,6 +2147,7 @@ enum {
2139#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ 2147#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
2140#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ 2148#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */
2141#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ 2149#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
2150#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */
2142#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ 2151#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */
2143#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ 2152#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */
2144#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ 2153#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */
@@ -2227,6 +2236,8 @@ enum {
2227#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ 2236#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
2228#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ 2237#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
2229#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ 2238#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
2239#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */
2240#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */
2230#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ 2241#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
2231#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ 2242#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
2232#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ 2243#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
@@ -3056,6 +3067,7 @@ struct ixgbe_mac_operations {
3056 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 3067 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
3057 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); 3068 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
3058 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); 3069 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
3070 void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
3059 3071
3060 /* DMA Coalescing */ 3072 /* DMA Coalescing */
3061 s32 (*dmac_config)(struct ixgbe_hw *hw); 3073 s32 (*dmac_config)(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index ba54ff07b438..49395420c9b3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -55,9 +55,6 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
55{ 55{
56 struct ixgbe_mac_info *mac = &hw->mac; 56 struct ixgbe_mac_info *mac = &hw->mac;
57 57
58 /* Call PHY identify routine to get the phy type */
59 ixgbe_identify_phy_generic(hw);
60
61 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; 58 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
62 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; 59 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
63 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; 60 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index ffdd1231f419..50bf81908dd6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -80,7 +80,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
80 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 80 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
81 * ixgbe_hw struct in order to set up EEPROM access. 81 * ixgbe_hw struct in order to set up EEPROM access.
82 **/ 82 **/
83s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) 83static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
84{ 84{
85 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 85 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
86 u32 eec; 86 u32 eec;
@@ -110,8 +110,8 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
110 * @device_type: 3 bit device type 110 * @device_type: 3 bit device type
111 * @phy_data: Pointer to read data from the register 111 * @phy_data: Pointer to read data from the register
112 **/ 112 **/
113s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, 113static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
114 u32 device_type, u32 *data) 114 u32 device_type, u32 *data)
115{ 115{
116 u32 i, command, error; 116 u32 i, command, error;
117 117
@@ -158,7 +158,8 @@ s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
158 * 158 *
159 * Reads a 16 bit word from the EEPROM using the hostif. 159 * Reads a 16 bit word from the EEPROM using the hostif.
160 **/ 160 **/
161s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) 161static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
162 u16 *data)
162{ 163{
163 s32 status; 164 s32 status;
164 struct ixgbe_hic_read_shadow_ram buffer; 165 struct ixgbe_hic_read_shadow_ram buffer;
@@ -193,8 +194,8 @@ s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
193 * 194 *
194 * Reads a 16 bit word(s) from the EEPROM using the hostif. 195 * Reads a 16 bit word(s) from the EEPROM using the hostif.
195 **/ 196 **/
196s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, 197static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
197 u16 offset, u16 words, u16 *data) 198 u16 offset, u16 words, u16 *data)
198{ 199{
199 struct ixgbe_hic_read_shadow_ram buffer; 200 struct ixgbe_hic_read_shadow_ram buffer;
200 u32 current_word = 0; 201 u32 current_word = 0;
@@ -331,7 +332,8 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
331 * 332 *
332 * Returns a negative error code on error, or the 16-bit checksum 333 * Returns a negative error code on error, or the 16-bit checksum
333 **/ 334 **/
334s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) 335static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
336 u32 buffer_size)
335{ 337{
336 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; 338 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
337 u16 *local_buffer; 339 u16 *local_buffer;
@@ -407,7 +409,7 @@ s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
407 * 409 *
408 * Returns a negative error code on error, or the 16-bit checksum 410 * Returns a negative error code on error, or the 16-bit checksum
409 **/ 411 **/
410s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) 412static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
411{ 413{
412 return ixgbe_calc_checksum_X550(hw, NULL, 0); 414 return ixgbe_calc_checksum_X550(hw, NULL, 0);
413} 415}
@@ -419,7 +421,7 @@ s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
419 * 421 *
420 * Reads a 16 bit word from the EEPROM using the hostif. 422 * Reads a 16 bit word from the EEPROM using the hostif.
421 **/ 423 **/
422s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) 424static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
423{ 425{
424 s32 status = 0; 426 s32 status = 0;
425 427
@@ -440,7 +442,8 @@ s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
440 * Performs checksum calculation and validates the EEPROM checksum. If the 442 * Performs checksum calculation and validates the EEPROM checksum. If the
441 * caller does not need checksum_val, the value can be NULL. 443 * caller does not need checksum_val, the value can be NULL.
442 **/ 444 **/
443s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) 445static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
446 u16 *checksum_val)
444{ 447{
445 s32 status; 448 s32 status;
446 u16 checksum; 449 u16 checksum;
@@ -489,7 +492,8 @@ s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
489 * 492 *
490 * Write a 16 bit word to the EEPROM using the hostif. 493 * Write a 16 bit word to the EEPROM using the hostif.
491 **/ 494 **/
492s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data) 495static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
496 u16 data)
493{ 497{
494 s32 status; 498 s32 status;
495 struct ixgbe_hic_write_shadow_ram buffer; 499 struct ixgbe_hic_write_shadow_ram buffer;
@@ -517,7 +521,7 @@ s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
517 * 521 *
518 * Write a 16 bit word to the EEPROM using the hostif. 522 * Write a 16 bit word to the EEPROM using the hostif.
519 **/ 523 **/
520s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) 524static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
521{ 525{
522 s32 status = 0; 526 s32 status = 0;
523 527
@@ -537,7 +541,7 @@ s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
537 * 541 *
538 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. 542 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
539 **/ 543 **/
540s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) 544static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
541{ 545{
542 s32 status = 0; 546 s32 status = 0;
543 union ixgbe_hic_hdr2 buffer; 547 union ixgbe_hic_hdr2 buffer;
@@ -560,7 +564,7 @@ s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
560 * checksum and updates the EEPROM and instructs the hardware to update 564 * checksum and updates the EEPROM and instructs the hardware to update
561 * the flash. 565 * the flash.
562 **/ 566 **/
563s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) 567static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
564{ 568{
565 s32 status; 569 s32 status;
566 u16 checksum = 0; 570 u16 checksum = 0;
@@ -600,8 +604,9 @@ s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
600 * 604 *
601 * Write a 16 bit word(s) to the EEPROM using the hostif. 605 * Write a 16 bit word(s) to the EEPROM using the hostif.
602 **/ 606 **/
603s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, 607static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
604 u16 offset, u16 words, u16 *data) 608 u16 offset, u16 words,
609 u16 *data)
605{ 610{
606 s32 status = 0; 611 s32 status = 0;
607 u32 i = 0; 612 u32 i = 0;
@@ -630,7 +635,7 @@ s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
630/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers 635/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
631 * @hw: pointer to hardware structure 636 * @hw: pointer to hardware structure
632 **/ 637 **/
633void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) 638static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
634{ 639{
635 struct ixgbe_mac_info *mac = &hw->mac; 640 struct ixgbe_mac_info *mac = &hw->mac;
636 641
@@ -647,7 +652,7 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
647/** ixgbe_setup_sfp_modules_X550em - Setup SFP module 652/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
648 * @hw: pointer to hardware structure 653 * @hw: pointer to hardware structure
649 */ 654 */
650s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) 655static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
651{ 656{
652 bool setup_linear; 657 bool setup_linear;
653 u16 reg_slice, edc_mode; 658 u16 reg_slice, edc_mode;
@@ -703,9 +708,9 @@ s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
703 * @speed: pointer to link speed 708 * @speed: pointer to link speed
704 * @autoneg: true when autoneg or autotry is enabled 709 * @autoneg: true when autoneg or autotry is enabled
705 **/ 710 **/
706s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, 711static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
707 ixgbe_link_speed *speed, 712 ixgbe_link_speed *speed,
708 bool *autoneg) 713 bool *autoneg)
709{ 714{
710 /* SFP */ 715 /* SFP */
711 if (hw->phy.media_type == ixgbe_media_type_fiber) { 716 if (hw->phy.media_type == ixgbe_media_type_fiber) {
@@ -740,8 +745,8 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
740 * @device_type: 3 bit device type 745 * @device_type: 3 bit device type
741 * @data: Data to write to the register 746 * @data: Data to write to the register
742 **/ 747 **/
743s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, 748static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
744 u32 device_type, u32 data) 749 u32 device_type, u32 data)
745{ 750{
746 u32 i, command, error; 751 u32 i, command, error;
747 752
@@ -904,7 +909,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
904 * 909 *
905 * Configures the integrated KX4 PHY. 910 * Configures the integrated KX4 PHY.
906 **/ 911 **/
907s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) 912static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
908{ 913{
909 s32 status; 914 s32 status;
910 u32 reg_val; 915 u32 reg_val;
@@ -942,7 +947,7 @@ s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
942 * 947 *
943 * Configures the integrated KR PHY. 948 * Configures the integrated KR PHY.
944 **/ 949 **/
945s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) 950static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
946{ 951{
947 s32 status; 952 s32 status;
948 u32 reg_val; 953 u32 reg_val;
@@ -987,7 +992,7 @@ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
987 * A return of a non-zero value indicates an error, and the base driver should 992 * A return of a non-zero value indicates an error, and the base driver should
988 * not report link up. 993 * not report link up.
989 **/ 994 **/
990s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) 995static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
991{ 996{
992 u32 status; 997 u32 status;
993 u16 lasi, autoneg_status, speed; 998 u16 lasi, autoneg_status, speed;
@@ -1049,7 +1054,7 @@ s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
1049 * set during init_shared_code because the PHY/SFP type was 1054 * set during init_shared_code because the PHY/SFP type was
1050 * not known. Perform the SFP init if necessary. 1055 * not known. Perform the SFP init if necessary.
1051 **/ 1056 **/
1052s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) 1057static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
1053{ 1058{
1054 struct ixgbe_phy_info *phy = &hw->phy; 1059 struct ixgbe_phy_info *phy = &hw->phy;
1055 s32 ret_val; 1060 s32 ret_val;
@@ -1102,7 +1107,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
1102 * Returns the media type (fiber, copper, backplane) 1107 * Returns the media type (fiber, copper, backplane)
1103 * 1108 *
1104 */ 1109 */
1105enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) 1110static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1106{ 1111{
1107 enum ixgbe_media_type media_type; 1112 enum ixgbe_media_type media_type;
1108 1113
@@ -1129,7 +1134,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
1129/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. 1134/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
1130 ** @hw: pointer to hardware structure 1135 ** @hw: pointer to hardware structure
1131 **/ 1136 **/
1132s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) 1137static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
1133{ 1138{
1134 u32 status; 1139 u32 status;
1135 u16 reg; 1140 u16 reg;
@@ -1202,7 +1207,7 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
1202 ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) 1207 ** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1203 ** reset. 1208 ** reset.
1204 **/ 1209 **/
1205s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) 1210static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
1206{ 1211{
1207 ixgbe_link_speed link_speed; 1212 ixgbe_link_speed link_speed;
1208 s32 status; 1213 s32 status;
@@ -1295,6 +1300,28 @@ mac_reset_top:
1295 return status; 1300 return status;
1296} 1301}
1297 1302
1303/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
1304 * anti-spoofing
1305 * @hw: pointer to hardware structure
1306 * @enable: enable or disable switch for Ethertype anti-spoofing
1307 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
1308 **/
1309void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
1310 int vf)
1311{
1312 int vf_target_reg = vf >> 3;
1313 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
1314 u32 pfvfspoof;
1315
1316 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1317 if (enable)
1318 pfvfspoof |= (1 << vf_target_shift);
1319 else
1320 pfvfspoof &= ~(1 << vf_target_shift);
1321
1322 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
1323}
1324
1298#define X550_COMMON_MAC \ 1325#define X550_COMMON_MAC \
1299 .init_hw = &ixgbe_init_hw_generic, \ 1326 .init_hw = &ixgbe_init_hw_generic, \
1300 .start_hw = &ixgbe_start_hw_X540, \ 1327 .start_hw = &ixgbe_start_hw_X540, \
@@ -1329,6 +1356,8 @@ mac_reset_top:
1329 .init_uta_tables = &ixgbe_init_uta_tables_generic, \ 1356 .init_uta_tables = &ixgbe_init_uta_tables_generic, \
1330 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ 1357 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
1331 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ 1358 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
1359 .set_ethertype_anti_spoofing = \
1360 &ixgbe_set_ethertype_anti_spoofing_X550, \
1332 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ 1361 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \
1333 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \ 1362 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \
1334 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ 1363 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
@@ -1345,7 +1374,6 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
1345 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 1374 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
1346 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, 1375 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
1347 .setup_link = &ixgbe_setup_mac_link_X540, 1376 .setup_link = &ixgbe_setup_mac_link_X540,
1348 .set_rxpba = &ixgbe_set_rxpba_generic,
1349 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, 1377 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
1350 .setup_sfp = NULL, 1378 .setup_sfp = NULL,
1351}; 1379};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8c44ab25f3fa..3a9b356dff01 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -43,6 +43,13 @@
43#define BP_EXTENDED_STATS 43#define BP_EXTENDED_STATS
44#endif 44#endif
45 45
46#define IXGBE_MAX_TXD_PWR 14
47#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
48
49/* Tx Descriptors needed, worst case */
50#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
51#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
52
46/* wrapper around a pointer to a socket buffer, 53/* wrapper around a pointer to a socket buffer,
47 * so a DMA handle can be stored along with the buffer */ 54 * so a DMA handle can be stored along with the buffer */
48struct ixgbevf_tx_buffer { 55struct ixgbevf_tx_buffer {
@@ -85,6 +92,18 @@ struct ixgbevf_rx_queue_stats {
85 u64 csum_err; 92 u64 csum_err;
86}; 93};
87 94
95enum ixgbevf_ring_state_t {
96 __IXGBEVF_TX_DETECT_HANG,
97 __IXGBEVF_HANG_CHECK_ARMED,
98};
99
100#define check_for_tx_hang(ring) \
101 test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
102#define set_check_for_tx_hang(ring) \
103 set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
104#define clear_check_for_tx_hang(ring) \
105 clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
106
88struct ixgbevf_ring { 107struct ixgbevf_ring {
89 struct ixgbevf_ring *next; 108 struct ixgbevf_ring *next;
90 struct net_device *netdev; 109 struct net_device *netdev;
@@ -101,7 +120,7 @@ struct ixgbevf_ring {
101 struct ixgbevf_tx_buffer *tx_buffer_info; 120 struct ixgbevf_tx_buffer *tx_buffer_info;
102 struct ixgbevf_rx_buffer *rx_buffer_info; 121 struct ixgbevf_rx_buffer *rx_buffer_info;
103 }; 122 };
104 123 unsigned long state;
105 struct ixgbevf_stats stats; 124 struct ixgbevf_stats stats;
106 struct u64_stats_sync syncp; 125 struct u64_stats_sync syncp;
107 union { 126 union {
@@ -124,6 +143,7 @@ struct ixgbevf_ring {
124 143
125#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES 144#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
126#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES 145#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
146#define IXGBEVF_MAX_RSS_QUEUES 2
127 147
128#define IXGBEVF_DEFAULT_TXD 1024 148#define IXGBEVF_DEFAULT_TXD 1024
129#define IXGBEVF_DEFAULT_RXD 512 149#define IXGBEVF_DEFAULT_RXD 512
@@ -347,8 +367,6 @@ struct ixgbevf_adapter {
347 /* this field must be first, see ixgbevf_process_skb_fields */ 367 /* this field must be first, see ixgbevf_process_skb_fields */
348 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 368 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
349 369
350 struct timer_list watchdog_timer;
351 struct work_struct reset_task;
352 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 370 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
353 371
354 /* Interrupt Throttle Rate */ 372 /* Interrupt Throttle Rate */
@@ -378,8 +396,7 @@ struct ixgbevf_adapter {
378 * thus the additional *_CAPABLE flags. 396 * thus the additional *_CAPABLE flags.
379 */ 397 */
380 u32 flags; 398 u32 flags;
381#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1) 399#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
382
383#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) 400#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
384 401
385 struct msix_entry *msix_entries; 402 struct msix_entry *msix_entries;
@@ -415,9 +432,11 @@ struct ixgbevf_adapter {
415 u32 link_speed; 432 u32 link_speed;
416 bool link_up; 433 bool link_up;
417 434
418 spinlock_t mbx_lock; 435 struct timer_list service_timer;
436 struct work_struct service_task;
419 437
420 struct work_struct watchdog_task; 438 spinlock_t mbx_lock;
439 unsigned long last_reset;
421}; 440};
422 441
423enum ixbgevf_state_t { 442enum ixbgevf_state_t {
@@ -426,7 +445,8 @@ enum ixbgevf_state_t {
426 __IXGBEVF_DOWN, 445 __IXGBEVF_DOWN,
427 __IXGBEVF_DISABLED, 446 __IXGBEVF_DISABLED,
428 __IXGBEVF_REMOVING, 447 __IXGBEVF_REMOVING,
429 __IXGBEVF_WORK_INIT, 448 __IXGBEVF_SERVICE_SCHED,
449 __IXGBEVF_SERVICE_INITED,
430}; 450};
431 451
432enum ixgbevf_boards { 452enum ixgbevf_boards {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 38c7a0be8197..4186981e562d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -98,6 +98,23 @@ static int debug = -1;
98module_param(debug, int, 0); 98module_param(debug, int, 0);
99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
100 100
101static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
102{
103 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
104 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
105 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
106 schedule_work(&adapter->service_task);
107}
108
109static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
110{
111 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
112
113 /* flush memory to make sure state is correct before next watchdog */
114 smp_mb__before_atomic();
115 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
116}
117
101/* forward decls */ 118/* forward decls */
102static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); 119static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
103static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 120static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
@@ -111,8 +128,8 @@ static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
111 return; 128 return;
112 hw->hw_addr = NULL; 129 hw->hw_addr = NULL;
113 dev_err(&adapter->pdev->dev, "Adapter removed\n"); 130 dev_err(&adapter->pdev->dev, "Adapter removed\n");
114 if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 131 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
115 schedule_work(&adapter->watchdog_task); 132 ixgbevf_service_event_schedule(adapter);
116} 133}
117 134
118static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) 135static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
@@ -199,14 +216,72 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
199 /* tx_buffer must be completely set up in the transmit path */ 216 /* tx_buffer must be completely set up in the transmit path */
200} 217}
201 218
202#define IXGBE_MAX_TXD_PWR 14 219static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
203#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 220{
221 return ring->stats.packets;
222}
223
224static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
225{
226 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
227 struct ixgbe_hw *hw = &adapter->hw;
228
229 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
230 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
231
232 if (head != tail)
233 return (head < tail) ?
234 tail - head : (tail + ring->count - head);
235
236 return 0;
237}
238
239static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
240{
241 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
242 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
243 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
244
245 clear_check_for_tx_hang(tx_ring);
246
247 /* Check for a hung queue, but be thorough. This verifies
248 * that a transmit has been completed since the previous
249 * check AND there is at least one packet pending. The
250 * ARMED bit is set to indicate a potential hang.
251 */
252 if ((tx_done_old == tx_done) && tx_pending) {
253 /* make sure it is true for two checks in a row */
254 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
255 &tx_ring->state);
256 }
257 /* reset the countdown */
258 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
259
260 /* update completed stats and continue */
261 tx_ring->tx_stats.tx_done_old = tx_done;
262
263 return false;
264}
265
266static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
267{
268 /* Do the reset outside of interrupt context */
269 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
270 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
271 ixgbevf_service_event_schedule(adapter);
272 }
273}
204 274
205/* Tx Descriptors needed, worst case */ 275/**
206#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 276 * ixgbevf_tx_timeout - Respond to a Tx Hang
207#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 277 * @netdev: network interface device structure
278 **/
279static void ixgbevf_tx_timeout(struct net_device *netdev)
280{
281 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
208 282
209static void ixgbevf_tx_timeout(struct net_device *netdev); 283 ixgbevf_tx_timeout_reset(adapter);
284}
210 285
211/** 286/**
212 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 287 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
@@ -311,6 +386,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
311 q_vector->tx.total_bytes += total_bytes; 386 q_vector->tx.total_bytes += total_bytes;
312 q_vector->tx.total_packets += total_packets; 387 q_vector->tx.total_packets += total_packets;
313 388
389 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
390 struct ixgbe_hw *hw = &adapter->hw;
391 union ixgbe_adv_tx_desc *eop_desc;
392
393 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
394
395 pr_err("Detected Tx Unit Hang\n"
396 " Tx Queue <%d>\n"
397 " TDH, TDT <%x>, <%x>\n"
398 " next_to_use <%x>\n"
399 " next_to_clean <%x>\n"
400 "tx_buffer_info[next_to_clean]\n"
401 " next_to_watch <%p>\n"
402 " eop_desc->wb.status <%x>\n"
403 " time_stamp <%lx>\n"
404 " jiffies <%lx>\n",
405 tx_ring->queue_index,
406 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
407 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
408 tx_ring->next_to_use, i,
409 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
410 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
411
412 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
413
414 /* schedule immediate reset if we believe we hung */
415 ixgbevf_tx_timeout_reset(adapter);
416
417 return true;
418 }
419
314#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 420#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
315 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 421 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
316 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 422 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -1158,9 +1264,7 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1158 1264
1159 hw->mac.get_link_status = 1; 1265 hw->mac.get_link_status = 1;
1160 1266
1161 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && 1267 ixgbevf_service_event_schedule(adapter);
1162 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1163 mod_timer(&adapter->watchdog_timer, jiffies);
1164 1268
1165 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 1269 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1166 1270
@@ -1479,6 +1583,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1479 txdctl |= (1 << 8) | /* HTHRESH = 1 */ 1583 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1480 32; /* PTHRESH = 32 */ 1584 32; /* PTHRESH = 32 */
1481 1585
1586 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1587
1482 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); 1588 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1483 1589
1484 /* poll to verify queue is enabled */ 1590 /* poll to verify queue is enabled */
@@ -1584,6 +1690,39 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1584 reg_idx); 1690 reg_idx);
1585} 1691}
1586 1692
1693static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1694{
1695 struct ixgbe_hw *hw = &adapter->hw;
1696 u32 vfmrqc = 0, vfreta = 0;
1697 u32 rss_key[10];
1698 u16 rss_i = adapter->num_rx_queues;
1699 int i, j;
1700
1701 /* Fill out hash function seeds */
1702 netdev_rss_key_fill(rss_key, sizeof(rss_key));
1703 for (i = 0; i < 10; i++)
1704 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1705
1706 /* Fill out redirection table */
1707 for (i = 0, j = 0; i < 64; i++, j++) {
1708 if (j == rss_i)
1709 j = 0;
1710 vfreta = (vfreta << 8) | (j * 0x1);
1711 if ((i & 3) == 3)
1712 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1713 }
1714
1715 /* Perform hash on these packet types */
1716 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1717 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1718 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1719 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1720
1721 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1722
1723 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1724}
1725
1587static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, 1726static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1588 struct ixgbevf_ring *ring) 1727 struct ixgbevf_ring *ring)
1589{ 1728{
@@ -1640,6 +1779,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1640 struct net_device *netdev = adapter->netdev; 1779 struct net_device *netdev = adapter->netdev;
1641 1780
1642 ixgbevf_setup_psrtype(adapter); 1781 ixgbevf_setup_psrtype(adapter);
1782 if (hw->mac.type >= ixgbe_mac_X550_vf)
1783 ixgbevf_setup_vfmrqc(adapter);
1643 1784
1644 /* notify the PF of our intent to use this size of frame */ 1785 /* notify the PF of our intent to use this size of frame */
1645 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); 1786 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
@@ -1794,7 +1935,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1794 struct ixgbe_hw *hw = &adapter->hw; 1935 struct ixgbe_hw *hw = &adapter->hw;
1795 unsigned int def_q = 0; 1936 unsigned int def_q = 0;
1796 unsigned int num_tcs = 0; 1937 unsigned int num_tcs = 0;
1797 unsigned int num_rx_queues = 1; 1938 unsigned int num_rx_queues = adapter->num_rx_queues;
1939 unsigned int num_tx_queues = adapter->num_tx_queues;
1798 int err; 1940 int err;
1799 1941
1800 spin_lock_bh(&adapter->mbx_lock); 1942 spin_lock_bh(&adapter->mbx_lock);
@@ -1808,6 +1950,9 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1808 return err; 1950 return err;
1809 1951
1810 if (num_tcs > 1) { 1952 if (num_tcs > 1) {
1953 /* we need only one Tx queue */
1954 num_tx_queues = 1;
1955
1811 /* update default Tx ring register index */ 1956 /* update default Tx ring register index */
1812 adapter->tx_ring[0]->reg_idx = def_q; 1957 adapter->tx_ring[0]->reg_idx = def_q;
1813 1958
@@ -1816,7 +1961,8 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1816 } 1961 }
1817 1962
1818 /* if we have a bad config abort request queue reset */ 1963 /* if we have a bad config abort request queue reset */
1819 if (adapter->num_rx_queues != num_rx_queues) { 1964 if ((adapter->num_rx_queues != num_rx_queues) ||
1965 (adapter->num_tx_queues != num_tx_queues)) {
1820 /* force mailbox timeout to prevent further messages */ 1966 /* force mailbox timeout to prevent further messages */
1821 hw->mbx.timeout = 0; 1967 hw->mbx.timeout = 0;
1822 1968
@@ -1917,6 +2063,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1917 clear_bit(__IXGBEVF_DOWN, &adapter->state); 2063 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1918 ixgbevf_napi_enable_all(adapter); 2064 ixgbevf_napi_enable_all(adapter);
1919 2065
2066 /* clear any pending interrupts, may auto mask */
2067 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2068 ixgbevf_irq_enable(adapter);
2069
1920 /* enable transmits */ 2070 /* enable transmits */
1921 netif_tx_start_all_queues(netdev); 2071 netif_tx_start_all_queues(netdev);
1922 2072
@@ -1924,21 +2074,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1924 ixgbevf_init_last_counter_stats(adapter); 2074 ixgbevf_init_last_counter_stats(adapter);
1925 2075
1926 hw->mac.get_link_status = 1; 2076 hw->mac.get_link_status = 1;
1927 mod_timer(&adapter->watchdog_timer, jiffies); 2077 mod_timer(&adapter->service_timer, jiffies);
1928} 2078}
1929 2079
1930void ixgbevf_up(struct ixgbevf_adapter *adapter) 2080void ixgbevf_up(struct ixgbevf_adapter *adapter)
1931{ 2081{
1932 struct ixgbe_hw *hw = &adapter->hw;
1933
1934 ixgbevf_configure(adapter); 2082 ixgbevf_configure(adapter);
1935 2083
1936 ixgbevf_up_complete(adapter); 2084 ixgbevf_up_complete(adapter);
1937
1938 /* clear any pending interrupts, may auto mask */
1939 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1940
1941 ixgbevf_irq_enable(adapter);
1942} 2085}
1943 2086
1944/** 2087/**
@@ -2045,22 +2188,19 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
2045 for (i = 0; i < adapter->num_rx_queues; i++) 2188 for (i = 0; i < adapter->num_rx_queues; i++)
2046 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); 2189 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2047 2190
2048 netif_tx_disable(netdev); 2191 usleep_range(10000, 20000);
2049
2050 msleep(10);
2051 2192
2052 netif_tx_stop_all_queues(netdev); 2193 netif_tx_stop_all_queues(netdev);
2053 2194
2195 /* call carrier off first to avoid false dev_watchdog timeouts */
2196 netif_carrier_off(netdev);
2197 netif_tx_disable(netdev);
2198
2054 ixgbevf_irq_disable(adapter); 2199 ixgbevf_irq_disable(adapter);
2055 2200
2056 ixgbevf_napi_disable_all(adapter); 2201 ixgbevf_napi_disable_all(adapter);
2057 2202
2058 del_timer_sync(&adapter->watchdog_timer); 2203 del_timer_sync(&adapter->service_timer);
2059 /* can't call flush scheduled work here because it can deadlock
2060 * if linkwatch_event tries to acquire the rtnl_lock which we are
2061 * holding */
2062 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
2063 msleep(1);
2064 2204
2065 /* disable transmits in the hardware now that interrupts are off */ 2205 /* disable transmits in the hardware now that interrupts are off */
2066 for (i = 0; i < adapter->num_tx_queues; i++) { 2206 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -2070,8 +2210,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
2070 IXGBE_TXDCTL_SWFLSH); 2210 IXGBE_TXDCTL_SWFLSH);
2071 } 2211 }
2072 2212
2073 netif_carrier_off(netdev);
2074
2075 if (!pci_channel_offline(adapter->pdev)) 2213 if (!pci_channel_offline(adapter->pdev))
2076 ixgbevf_reset(adapter); 2214 ixgbevf_reset(adapter);
2077 2215
@@ -2110,6 +2248,8 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2110 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 2248 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2111 netdev->addr_len); 2249 netdev->addr_len);
2112 } 2250 }
2251
2252 adapter->last_reset = jiffies;
2113} 2253}
2114 2254
2115static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 2255static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
@@ -2181,8 +2321,19 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2181 return; 2321 return;
2182 2322
2183 /* we need as many queues as traffic classes */ 2323 /* we need as many queues as traffic classes */
2184 if (num_tcs > 1) 2324 if (num_tcs > 1) {
2185 adapter->num_rx_queues = num_tcs; 2325 adapter->num_rx_queues = num_tcs;
2326 } else {
2327 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2328
2329 switch (hw->api_version) {
2330 case ixgbe_mbox_api_11:
2331 adapter->num_rx_queues = rss;
2332 adapter->num_tx_queues = rss;
2333 default:
2334 break;
2335 }
2336 }
2186} 2337}
2187 2338
2188/** 2339/**
@@ -2552,7 +2703,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2552 struct ixgbe_hw *hw = &adapter->hw; 2703 struct ixgbe_hw *hw = &adapter->hw;
2553 int i; 2704 int i;
2554 2705
2555 if (!adapter->link_up) 2706 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2707 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2556 return; 2708 return;
2557 2709
2558 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2710 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
@@ -2576,79 +2728,176 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2576} 2728}
2577 2729
2578/** 2730/**
2579 * ixgbevf_watchdog - Timer Call-back 2731 * ixgbevf_service_timer - Timer Call-back
2580 * @data: pointer to adapter cast into an unsigned long 2732 * @data: pointer to adapter cast into an unsigned long
2581 **/ 2733 **/
2582static void ixgbevf_watchdog(unsigned long data) 2734static void ixgbevf_service_timer(unsigned long data)
2583{ 2735{
2584 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2736 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2737
2738 /* Reset the timer */
2739 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2740
2741 ixgbevf_service_event_schedule(adapter);
2742}
2743
2744static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
2745{
2746 if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
2747 return;
2748
2749 adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
2750
2751 /* If we're already down or resetting, just bail */
2752 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2753 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2754 return;
2755
2756 adapter->tx_timeout_count++;
2757
2758 ixgbevf_reinit_locked(adapter);
2759}
2760
2761/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2762 * @adapter - pointer to the device adapter structure
2763 *
2764 * This function serves two purposes. First it strobes the interrupt lines
2765 * in order to make certain interrupts are occurring. Secondly it sets the
2766 * bits needed to check for TX hangs. As a result we should immediately
2767 * determine if a hang has occurred.
2768 */
2769static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2770{
2585 struct ixgbe_hw *hw = &adapter->hw; 2771 struct ixgbe_hw *hw = &adapter->hw;
2586 u32 eics = 0; 2772 u32 eics = 0;
2587 int i; 2773 int i;
2588 2774
2589 /* 2775 /* If we're down or resetting, just bail */
2590 * Do the watchdog outside of interrupt context due to the lovely 2776 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2591 * delays that some of the newer hardware requires 2777 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2592 */ 2778 return;
2593 2779
2594 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2780 /* Force detection of hung controller */
2595 goto watchdog_short_circuit; 2781 if (netif_carrier_ok(adapter->netdev)) {
2782 for (i = 0; i < adapter->num_tx_queues; i++)
2783 set_check_for_tx_hang(adapter->tx_ring[i]);
2784 }
2596 2785
2597 /* get one bit for every active tx/rx interrupt vector */ 2786 /* get one bit for every active tx/rx interrupt vector */
2598 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2787 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2599 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2788 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2789
2600 if (qv->rx.ring || qv->tx.ring) 2790 if (qv->rx.ring || qv->tx.ring)
2601 eics |= 1 << i; 2791 eics |= 1 << i;
2602 } 2792 }
2603 2793
2794 /* Cause software interrupt to ensure rings are cleaned */
2604 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2795 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2796}
2605 2797
2606watchdog_short_circuit: 2798/**
2607 schedule_work(&adapter->watchdog_task); 2799 * ixgbevf_watchdog_update_link - update the link status
2800 * @adapter - pointer to the device adapter structure
2801 **/
2802static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
2803{
2804 struct ixgbe_hw *hw = &adapter->hw;
2805 u32 link_speed = adapter->link_speed;
2806 bool link_up = adapter->link_up;
2807 s32 err;
2808
2809 spin_lock_bh(&adapter->mbx_lock);
2810
2811 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2812
2813 spin_unlock_bh(&adapter->mbx_lock);
2814
2815 /* if check for link returns error we will need to reset */
2816 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
2817 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
2818 link_up = false;
2819 }
2820
2821 adapter->link_up = link_up;
2822 adapter->link_speed = link_speed;
2608} 2823}
2609 2824
2610/** 2825/**
2611 * ixgbevf_tx_timeout - Respond to a Tx Hang 2826 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2612 * @netdev: network interface device structure 2827 * print link up message
2828 * @adapter - pointer to the device adapter structure
2613 **/ 2829 **/
2614static void ixgbevf_tx_timeout(struct net_device *netdev) 2830static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2615{ 2831{
2616 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2832 struct net_device *netdev = adapter->netdev;
2617 2833
2618 /* Do the reset outside of interrupt context */ 2834 /* only continue if link was previously down */
2619 schedule_work(&adapter->reset_task); 2835 if (netif_carrier_ok(netdev))
2836 return;
2837
2838 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2839 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2840 "10 Gbps" :
2841 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2842 "1 Gbps" :
2843 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2844 "100 Mbps" :
2845 "unknown speed");
2846
2847 netif_carrier_on(netdev);
2620} 2848}
2621 2849
2622static void ixgbevf_reset_task(struct work_struct *work) 2850/**
2851 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2852 * print link down message
2853 * @adapter - pointer to the adapter structure
2854 **/
2855static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2623{ 2856{
2624 struct ixgbevf_adapter *adapter; 2857 struct net_device *netdev = adapter->netdev;
2625 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2626 2858
2627 /* If we're already down or resetting, just bail */ 2859 adapter->link_speed = 0;
2860
2861 /* only continue if link was up previously */
2862 if (!netif_carrier_ok(netdev))
2863 return;
2864
2865 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2866
2867 netif_carrier_off(netdev);
2868}
2869
2870/**
2871 * ixgbevf_watchdog_subtask - worker thread to bring link up
2872 * @work: pointer to work_struct containing our data
2873 **/
2874static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2875{
2876 /* if interface is down do nothing */
2628 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2877 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2629 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2630 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2878 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2631 return; 2879 return;
2632 2880
2633 adapter->tx_timeout_count++; 2881 ixgbevf_watchdog_update_link(adapter);
2634 2882
2635 ixgbevf_reinit_locked(adapter); 2883 if (adapter->link_up)
2884 ixgbevf_watchdog_link_is_up(adapter);
2885 else
2886 ixgbevf_watchdog_link_is_down(adapter);
2887
2888 ixgbevf_update_stats(adapter);
2636} 2889}
2637 2890
2638/** 2891/**
2639 * ixgbevf_watchdog_task - worker thread to bring link up 2892 * ixgbevf_service_task - manages and runs subtasks
2640 * @work: pointer to work_struct containing our data 2893 * @work: pointer to work_struct containing our data
2641 **/ 2894 **/
2642static void ixgbevf_watchdog_task(struct work_struct *work) 2895static void ixgbevf_service_task(struct work_struct *work)
2643{ 2896{
2644 struct ixgbevf_adapter *adapter = container_of(work, 2897 struct ixgbevf_adapter *adapter = container_of(work,
2645 struct ixgbevf_adapter, 2898 struct ixgbevf_adapter,
2646 watchdog_task); 2899 service_task);
2647 struct net_device *netdev = adapter->netdev;
2648 struct ixgbe_hw *hw = &adapter->hw; 2900 struct ixgbe_hw *hw = &adapter->hw;
2649 u32 link_speed = adapter->link_speed;
2650 bool link_up = adapter->link_up;
2651 s32 need_reset;
2652 2901
2653 if (IXGBE_REMOVED(hw->hw_addr)) { 2902 if (IXGBE_REMOVED(hw->hw_addr)) {
2654 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { 2903 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
@@ -2658,73 +2907,13 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2658 } 2907 }
2659 return; 2908 return;
2660 } 2909 }
2661 ixgbevf_queue_reset_subtask(adapter);
2662
2663 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2664
2665 /*
2666 * Always check the link on the watchdog because we have
2667 * no LSC interrupt
2668 */
2669 spin_lock_bh(&adapter->mbx_lock);
2670
2671 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2672
2673 spin_unlock_bh(&adapter->mbx_lock);
2674
2675 if (need_reset) {
2676 adapter->link_up = link_up;
2677 adapter->link_speed = link_speed;
2678 netif_carrier_off(netdev);
2679 netif_tx_stop_all_queues(netdev);
2680 schedule_work(&adapter->reset_task);
2681 goto pf_has_reset;
2682 }
2683 adapter->link_up = link_up;
2684 adapter->link_speed = link_speed;
2685
2686 if (link_up) {
2687 if (!netif_carrier_ok(netdev)) {
2688 char *link_speed_string;
2689 switch (link_speed) {
2690 case IXGBE_LINK_SPEED_10GB_FULL:
2691 link_speed_string = "10 Gbps";
2692 break;
2693 case IXGBE_LINK_SPEED_1GB_FULL:
2694 link_speed_string = "1 Gbps";
2695 break;
2696 case IXGBE_LINK_SPEED_100_FULL:
2697 link_speed_string = "100 Mbps";
2698 break;
2699 default:
2700 link_speed_string = "unknown speed";
2701 break;
2702 }
2703 dev_info(&adapter->pdev->dev,
2704 "NIC Link is Up, %s\n", link_speed_string);
2705 netif_carrier_on(netdev);
2706 netif_tx_wake_all_queues(netdev);
2707 }
2708 } else {
2709 adapter->link_up = false;
2710 adapter->link_speed = 0;
2711 if (netif_carrier_ok(netdev)) {
2712 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2713 netif_carrier_off(netdev);
2714 netif_tx_stop_all_queues(netdev);
2715 }
2716 }
2717 2910
2718 ixgbevf_update_stats(adapter); 2911 ixgbevf_queue_reset_subtask(adapter);
2719 2912 ixgbevf_reset_subtask(adapter);
2720pf_has_reset: 2913 ixgbevf_watchdog_subtask(adapter);
2721 /* Reset the timer */ 2914 ixgbevf_check_hang_subtask(adapter);
2722 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2723 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
2724 mod_timer(&adapter->watchdog_timer,
2725 round_jiffies(jiffies + (2 * HZ)));
2726 2915
2727 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2916 ixgbevf_service_event_complete(adapter);
2728} 2917}
2729 2918
2730/** 2919/**
@@ -2944,10 +3133,6 @@ static int ixgbevf_open(struct net_device *netdev)
2944 if (!adapter->num_msix_vectors) 3133 if (!adapter->num_msix_vectors)
2945 return -ENOMEM; 3134 return -ENOMEM;
2946 3135
2947 /* disallow open during test */
2948 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2949 return -EBUSY;
2950
2951 if (hw->adapter_stopped) { 3136 if (hw->adapter_stopped) {
2952 ixgbevf_reset(adapter); 3137 ixgbevf_reset(adapter);
2953 /* if adapter is still stopped then PF isn't up and 3138 /* if adapter is still stopped then PF isn't up and
@@ -2960,6 +3145,12 @@ static int ixgbevf_open(struct net_device *netdev)
2960 } 3145 }
2961 } 3146 }
2962 3147
3148 /* disallow open during test */
3149 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3150 return -EBUSY;
3151
3152 netif_carrier_off(netdev);
3153
2963 /* allocate transmit descriptors */ 3154 /* allocate transmit descriptors */
2964 err = ixgbevf_setup_all_tx_resources(adapter); 3155 err = ixgbevf_setup_all_tx_resources(adapter);
2965 if (err) 3156 if (err)
@@ -2979,15 +3170,11 @@ static int ixgbevf_open(struct net_device *netdev)
2979 */ 3170 */
2980 ixgbevf_map_rings_to_vectors(adapter); 3171 ixgbevf_map_rings_to_vectors(adapter);
2981 3172
2982 ixgbevf_up_complete(adapter);
2983
2984 /* clear any pending interrupts, may auto mask */
2985 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2986 err = ixgbevf_request_irq(adapter); 3173 err = ixgbevf_request_irq(adapter);
2987 if (err) 3174 if (err)
2988 goto err_req_irq; 3175 goto err_req_irq;
2989 3176
2990 ixgbevf_irq_enable(adapter); 3177 ixgbevf_up_complete(adapter);
2991 3178
2992 return 0; 3179 return 0;
2993 3180
@@ -3452,8 +3639,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3452 first->bytecount = skb->len; 3639 first->bytecount = skb->len;
3453 first->gso_segs = 1; 3640 first->gso_segs = 1;
3454 3641
3455 if (vlan_tx_tag_present(skb)) { 3642 if (skb_vlan_tag_present(skb)) {
3456 tx_flags |= vlan_tx_tag_get(skb); 3643 tx_flags |= skb_vlan_tag_get(skb);
3457 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3644 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3458 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3645 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3459 } 3646 }
@@ -3822,28 +4009,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3822 NETIF_F_HW_VLAN_CTAG_RX | 4009 NETIF_F_HW_VLAN_CTAG_RX |
3823 NETIF_F_HW_VLAN_CTAG_FILTER; 4010 NETIF_F_HW_VLAN_CTAG_FILTER;
3824 4011
3825 netdev->vlan_features |= NETIF_F_TSO; 4012 netdev->vlan_features |= NETIF_F_TSO |
3826 netdev->vlan_features |= NETIF_F_TSO6; 4013 NETIF_F_TSO6 |
3827 netdev->vlan_features |= NETIF_F_IP_CSUM; 4014 NETIF_F_IP_CSUM |
3828 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 4015 NETIF_F_IPV6_CSUM |
3829 netdev->vlan_features |= NETIF_F_SG; 4016 NETIF_F_SG;
3830 4017
3831 if (pci_using_dac) 4018 if (pci_using_dac)
3832 netdev->features |= NETIF_F_HIGHDMA; 4019 netdev->features |= NETIF_F_HIGHDMA;
3833 4020
3834 netdev->priv_flags |= IFF_UNICAST_FLT; 4021 netdev->priv_flags |= IFF_UNICAST_FLT;
3835 4022
3836 init_timer(&adapter->watchdog_timer);
3837 adapter->watchdog_timer.function = ixgbevf_watchdog;
3838 adapter->watchdog_timer.data = (unsigned long)adapter;
3839
3840 if (IXGBE_REMOVED(hw->hw_addr)) { 4023 if (IXGBE_REMOVED(hw->hw_addr)) {
3841 err = -EIO; 4024 err = -EIO;
3842 goto err_sw_init; 4025 goto err_sw_init;
3843 } 4026 }
3844 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 4027
3845 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 4028 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
3846 set_bit(__IXGBEVF_WORK_INIT, &adapter->state); 4029 (unsigned long)adapter);
4030
4031 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4032 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4033 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
3847 4034
3848 err = ixgbevf_init_interrupt_scheme(adapter); 4035 err = ixgbevf_init_interrupt_scheme(adapter);
3849 if (err) 4036 if (err)
@@ -3917,11 +4104,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3917 adapter = netdev_priv(netdev); 4104 adapter = netdev_priv(netdev);
3918 4105
3919 set_bit(__IXGBEVF_REMOVING, &adapter->state); 4106 set_bit(__IXGBEVF_REMOVING, &adapter->state);
3920 4107 cancel_work_sync(&adapter->service_task);
3921 del_timer_sync(&adapter->watchdog_timer);
3922
3923 cancel_work_sync(&adapter->reset_task);
3924 cancel_work_sync(&adapter->watchdog_task);
3925 4108
3926 if (netdev->reg_state == NETREG_REGISTERED) 4109 if (netdev->reg_state == NETREG_REGISTERED)
3927 unregister_netdev(netdev); 4110 unregister_netdev(netdev);
@@ -3955,7 +4138,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3955 struct net_device *netdev = pci_get_drvdata(pdev); 4138 struct net_device *netdev = pci_get_drvdata(pdev);
3956 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 4139 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3957 4140
3958 if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state)) 4141 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
3959 return PCI_ERS_RESULT_DISCONNECT; 4142 return PCI_ERS_RESULT_DISCONNECT;
3960 4143
3961 rtnl_lock(); 4144 rtnl_lock();
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 09dd8f698bea..3e712fd6e695 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -69,6 +69,16 @@
69#define IXGBE_VFGOTC_LSB 0x02020 69#define IXGBE_VFGOTC_LSB 0x02020
70#define IXGBE_VFGOTC_MSB 0x02024 70#define IXGBE_VFGOTC_MSB 0x02024
71#define IXGBE_VFMPRC 0x01034 71#define IXGBE_VFMPRC 0x01034
72#define IXGBE_VFMRQC 0x3000
73#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4))
74#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4))
75
76/* VFMRQC bits */
77#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */
78#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000
79#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000
80#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000
81#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000
72 82
73#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) 83#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
74 84
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 44ce7d88f554..6e9a792097d3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2154,9 +2154,9 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
2154static inline void 2154static inline void
2155jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) 2155jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
2156{ 2156{
2157 if (vlan_tx_tag_present(skb)) { 2157 if (skb_vlan_tag_present(skb)) {
2158 *flags |= TXFLAG_TAGON; 2158 *flags |= TXFLAG_TAGON;
2159 *vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 2159 *vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2160 } 2160 }
2161} 2161}
2162 2162
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 867a6a3ef81f..d9f4498832a1 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1895,14 +1895,14 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1895 ctrl = 0; 1895 ctrl = 0;
1896 1896
1897 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ 1897 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1898 if (vlan_tx_tag_present(skb)) { 1898 if (skb_vlan_tag_present(skb)) {
1899 if (!le) { 1899 if (!le) {
1900 le = get_tx_le(sky2, &slot); 1900 le = get_tx_le(sky2, &slot);
1901 le->addr = 0; 1901 le->addr = 0;
1902 le->opcode = OP_VLAN|HW_OWNER; 1902 le->opcode = OP_VLAN|HW_OWNER;
1903 } else 1903 } else
1904 le->opcode |= OP_VLAN; 1904 le->opcode |= OP_VLAN;
1905 le->length = cpu_to_be16(vlan_tx_tag_get(skb)); 1905 le->length = cpu_to_be16(skb_vlan_tag_get(skb));
1906 ctrl |= INS_VLAN; 1906 ctrl |= INS_VLAN;
1907 } 1907 }
1908 1908
@@ -2594,7 +2594,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2594 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; 2594 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2595 prefetch(sky2->rx_ring + sky2->rx_next); 2595 prefetch(sky2->rx_ring + sky2->rx_next);
2596 2596
2597 if (vlan_tx_tag_present(re->skb)) 2597 if (skb_vlan_tag_present(re->skb))
2598 count -= VLAN_HLEN; /* Account for vlan tag */ 2598 count -= VLAN_HLEN; /* Account for vlan tag */
2599 2599
2600 /* This chip has hardware problems that generates bogus status. 2600 /* This chip has hardware problems that generates bogus status.
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 963dd7e6d547..0c51c69f802f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -592,7 +592,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
592 buf->nbufs = 1; 592 buf->nbufs = 1;
593 buf->npages = 1; 593 buf->npages = 1;
594 buf->page_shift = get_order(size) + PAGE_SHIFT; 594 buf->page_shift = get_order(size) + PAGE_SHIFT;
595 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, 595 buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
596 size, &t, gfp); 596 size, &t, gfp);
597 if (!buf->direct.buf) 597 if (!buf->direct.buf)
598 return -ENOMEM; 598 return -ENOMEM;
@@ -619,7 +619,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
619 619
620 for (i = 0; i < buf->nbufs; ++i) { 620 for (i = 0; i < buf->nbufs; ++i) {
621 buf->page_list[i].buf = 621 buf->page_list[i].buf =
622 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, 622 dma_alloc_coherent(&dev->persist->pdev->dev,
623 PAGE_SIZE,
623 &t, gfp); 624 &t, gfp);
624 if (!buf->page_list[i].buf) 625 if (!buf->page_list[i].buf)
625 goto err_free; 626 goto err_free;
@@ -657,15 +658,17 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
657 int i; 658 int i;
658 659
659 if (buf->nbufs == 1) 660 if (buf->nbufs == 1)
660 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 661 dma_free_coherent(&dev->persist->pdev->dev, size,
662 buf->direct.buf,
661 buf->direct.map); 663 buf->direct.map);
662 else { 664 else {
663 if (BITS_PER_LONG == 64 && buf->direct.buf) 665 if (BITS_PER_LONG == 64)
664 vunmap(buf->direct.buf); 666 vunmap(buf->direct.buf);
665 667
666 for (i = 0; i < buf->nbufs; ++i) 668 for (i = 0; i < buf->nbufs; ++i)
667 if (buf->page_list[i].buf) 669 if (buf->page_list[i].buf)
668 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 670 dma_free_coherent(&dev->persist->pdev->dev,
671 PAGE_SIZE,
669 buf->page_list[i].buf, 672 buf->page_list[i].buf,
670 buf->page_list[i].map); 673 buf->page_list[i].map);
671 kfree(buf->page_list); 674 kfree(buf->page_list);
@@ -738,7 +741,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp
738 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) 741 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
739 goto out; 742 goto out;
740 743
741 pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp); 744 pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
742 if (!pgdir) { 745 if (!pgdir) {
743 ret = -ENOMEM; 746 ret = -ENOMEM;
744 goto out; 747 goto out;
@@ -775,7 +778,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
775 set_bit(i, db->u.pgdir->bits[o]); 778 set_bit(i, db->u.pgdir->bits[o]);
776 779
777 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { 780 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
778 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 781 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
779 db->u.pgdir->db_page, db->u.pgdir->db_dma); 782 db->u.pgdir->db_page, db->u.pgdir->db_dma);
780 list_del(&db->u.pgdir->list); 783 list_del(&db->u.pgdir->list);
781 kfree(db->u.pgdir); 784 kfree(db->u.pgdir);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 9c656fe4983d..715de8affcc9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -40,16 +40,177 @@ enum {
40 MLX4_CATAS_POLL_INTERVAL = 5 * HZ, 40 MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
41}; 41};
42 42
43static DEFINE_SPINLOCK(catas_lock);
44 43
45static LIST_HEAD(catas_list);
46static struct work_struct catas_work;
47 44
48static int internal_err_reset = 1; 45int mlx4_internal_err_reset = 1;
49module_param(internal_err_reset, int, 0644); 46module_param_named(internal_err_reset, mlx4_internal_err_reset, int, 0644);
50MODULE_PARM_DESC(internal_err_reset, 47MODULE_PARM_DESC(internal_err_reset,
51 "Reset device on internal errors if non-zero" 48 "Reset device on internal errors if non-zero (default 1)");
52 " (default 1, in SRIOV mode default is 0)"); 49
50static int read_vendor_id(struct mlx4_dev *dev)
51{
52 u16 vendor_id = 0;
53 int ret;
54
55 ret = pci_read_config_word(dev->persist->pdev, 0, &vendor_id);
56 if (ret) {
57 mlx4_err(dev, "Failed to read vendor ID, ret=%d\n", ret);
58 return ret;
59 }
60
61 if (vendor_id == 0xffff) {
62 mlx4_err(dev, "PCI can't be accessed to read vendor id\n");
63 return -EINVAL;
64 }
65
66 return 0;
67}
68
69static int mlx4_reset_master(struct mlx4_dev *dev)
70{
71 int err = 0;
72
73 if (mlx4_is_master(dev))
74 mlx4_report_internal_err_comm_event(dev);
75
76 if (!pci_channel_offline(dev->persist->pdev)) {
77 err = read_vendor_id(dev);
78 /* If PCI can't be accessed to read vendor ID we assume that its
79 * link was disabled and chip was already reset.
80 */
81 if (err)
82 return 0;
83
84 err = mlx4_reset(dev);
85 if (err)
86 mlx4_err(dev, "Fail to reset HCA\n");
87 }
88
89 return err;
90}
91
92static int mlx4_reset_slave(struct mlx4_dev *dev)
93{
94#define COM_CHAN_RST_REQ_OFFSET 0x10
95#define COM_CHAN_RST_ACK_OFFSET 0x08
96
97 u32 comm_flags;
98 u32 rst_req;
99 u32 rst_ack;
100 unsigned long end;
101 struct mlx4_priv *priv = mlx4_priv(dev);
102
103 if (pci_channel_offline(dev->persist->pdev))
104 return 0;
105
106 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
107 MLX4_COMM_CHAN_FLAGS));
108 if (comm_flags == 0xffffffff) {
109 mlx4_err(dev, "VF reset is not needed\n");
110 return 0;
111 }
112
113 if (!(dev->caps.vf_caps & MLX4_VF_CAP_FLAG_RESET)) {
114 mlx4_err(dev, "VF reset is not supported\n");
115 return -EOPNOTSUPP;
116 }
117
118 rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
119 COM_CHAN_RST_REQ_OFFSET;
120 rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
121 COM_CHAN_RST_ACK_OFFSET;
122 if (rst_req != rst_ack) {
123 mlx4_err(dev, "Communication channel isn't sync, fail to send reset\n");
124 return -EIO;
125 }
126
127 rst_req ^= 1;
128 mlx4_warn(dev, "VF is sending reset request to Firmware\n");
129 comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET;
130 __raw_writel((__force u32)cpu_to_be32(comm_flags),
131 (__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS);
132 /* Make sure that our comm channel write doesn't
133 * get mixed in with writes from another CPU.
134 */
135 mmiowb();
136
137 end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies;
138 while (time_before(jiffies, end)) {
139 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
140 MLX4_COMM_CHAN_FLAGS));
141 rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
142 COM_CHAN_RST_ACK_OFFSET;
143
144 /* Reading rst_req again since the communication channel can
145 * be reset at any time by the PF and all its bits will be
146 * set to zero.
147 */
148 rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
149 COM_CHAN_RST_REQ_OFFSET;
150
151 if (rst_ack == rst_req) {
152 mlx4_warn(dev, "VF Reset succeed\n");
153 return 0;
154 }
155 cond_resched();
156 }
157 mlx4_err(dev, "Fail to send reset over the communication channel\n");
158 return -ETIMEDOUT;
159}
160
161static int mlx4_comm_internal_err(u32 slave_read)
162{
163 return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
164 (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
165}
166
167void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
168{
169 int err;
170 struct mlx4_dev *dev;
171
172 if (!mlx4_internal_err_reset)
173 return;
174
175 mutex_lock(&persist->device_state_mutex);
176 if (persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
177 goto out;
178
179 dev = persist->dev;
180 mlx4_err(dev, "device is going to be reset\n");
181 if (mlx4_is_slave(dev))
182 err = mlx4_reset_slave(dev);
183 else
184 err = mlx4_reset_master(dev);
185 BUG_ON(err != 0);
186
187 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
188 mlx4_err(dev, "device was reset successfully\n");
189 mutex_unlock(&persist->device_state_mutex);
190
191 /* At that step HW was already reset, now notify clients */
192 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
193 mlx4_cmd_wake_completions(dev);
194 return;
195
196out:
197 mutex_unlock(&persist->device_state_mutex);
198}
199
200static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
201{
202 int err = 0;
203
204 mlx4_enter_error_state(persist);
205 mutex_lock(&persist->interface_state_mutex);
206 if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
207 !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
208 err = mlx4_restart_one(persist->pdev);
209 mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
210 err);
211 }
212 mutex_unlock(&persist->interface_state_mutex);
213}
53 214
54static void dump_err_buf(struct mlx4_dev *dev) 215static void dump_err_buf(struct mlx4_dev *dev)
55{ 216{
@@ -67,58 +228,40 @@ static void poll_catas(unsigned long dev_ptr)
67{ 228{
68 struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; 229 struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
69 struct mlx4_priv *priv = mlx4_priv(dev); 230 struct mlx4_priv *priv = mlx4_priv(dev);
231 u32 slave_read;
70 232
71 if (readl(priv->catas_err.map)) { 233 if (mlx4_is_slave(dev)) {
72 /* If the device is off-line, we cannot try to recover it */ 234 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
73 if (pci_channel_offline(dev->pdev)) 235 if (mlx4_comm_internal_err(slave_read)) {
74 mod_timer(&priv->catas_err.timer, 236 mlx4_warn(dev, "Internal error detected on the communication channel\n");
75 round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); 237 goto internal_err;
76 else {
77 dump_err_buf(dev);
78 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
79
80 if (internal_err_reset) {
81 spin_lock(&catas_lock);
82 list_add(&priv->catas_err.list, &catas_list);
83 spin_unlock(&catas_lock);
84
85 queue_work(mlx4_wq, &catas_work);
86 }
87 } 238 }
88 } else 239 } else if (readl(priv->catas_err.map)) {
89 mod_timer(&priv->catas_err.timer, 240 dump_err_buf(dev);
90 round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); 241 goto internal_err;
242 }
243
244 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
245 mlx4_warn(dev, "Internal error mark was detected on device\n");
246 goto internal_err;
247 }
248
249 mod_timer(&priv->catas_err.timer,
250 round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
251 return;
252
253internal_err:
254 if (mlx4_internal_err_reset)
255 queue_work(dev->persist->catas_wq, &dev->persist->catas_work);
91} 256}
92 257
93static void catas_reset(struct work_struct *work) 258static void catas_reset(struct work_struct *work)
94{ 259{
95 struct mlx4_priv *priv, *tmppriv; 260 struct mlx4_dev_persistent *persist =
96 struct mlx4_dev *dev; 261 container_of(work, struct mlx4_dev_persistent,
262 catas_work);
97 263
98 LIST_HEAD(tlist); 264 mlx4_handle_error_state(persist);
99 int ret;
100
101 spin_lock_irq(&catas_lock);
102 list_splice_init(&catas_list, &tlist);
103 spin_unlock_irq(&catas_lock);
104
105 list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
106 struct pci_dev *pdev = priv->dev.pdev;
107
108 /* If the device is off-line, we cannot reset it */
109 if (pci_channel_offline(pdev))
110 continue;
111
112 ret = mlx4_restart_one(priv->dev.pdev);
113 /* 'priv' now is not valid */
114 if (ret)
115 pr_err("mlx4 %s: Reset failed (%d)\n",
116 pci_name(pdev), ret);
117 else {
118 dev = pci_get_drvdata(pdev);
119 mlx4_dbg(dev, "Reset succeeded\n");
120 }
121 }
122} 265}
123 266
124void mlx4_start_catas_poll(struct mlx4_dev *dev) 267void mlx4_start_catas_poll(struct mlx4_dev *dev)
@@ -126,22 +269,21 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
126 struct mlx4_priv *priv = mlx4_priv(dev); 269 struct mlx4_priv *priv = mlx4_priv(dev);
127 phys_addr_t addr; 270 phys_addr_t addr;
128 271
129 /*If we are in SRIOV the default of the module param must be 0*/
130 if (mlx4_is_mfunc(dev))
131 internal_err_reset = 0;
132
133 INIT_LIST_HEAD(&priv->catas_err.list); 272 INIT_LIST_HEAD(&priv->catas_err.list);
134 init_timer(&priv->catas_err.timer); 273 init_timer(&priv->catas_err.timer);
135 priv->catas_err.map = NULL; 274 priv->catas_err.map = NULL;
136 275
137 addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + 276 if (!mlx4_is_slave(dev)) {
138 priv->fw.catas_offset; 277 addr = pci_resource_start(dev->persist->pdev,
278 priv->fw.catas_bar) +
279 priv->fw.catas_offset;
139 280
140 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); 281 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
141 if (!priv->catas_err.map) { 282 if (!priv->catas_err.map) {
142 mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", 283 mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
143 (unsigned long long) addr); 284 (unsigned long long)addr);
144 return; 285 return;
286 }
145 } 287 }
146 288
147 priv->catas_err.timer.data = (unsigned long) dev; 289 priv->catas_err.timer.data = (unsigned long) dev;
@@ -157,15 +299,29 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
157 299
158 del_timer_sync(&priv->catas_err.timer); 300 del_timer_sync(&priv->catas_err.timer);
159 301
160 if (priv->catas_err.map) 302 if (priv->catas_err.map) {
161 iounmap(priv->catas_err.map); 303 iounmap(priv->catas_err.map);
304 priv->catas_err.map = NULL;
305 }
162 306
163 spin_lock_irq(&catas_lock); 307 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION)
164 list_del(&priv->catas_err.list); 308 flush_workqueue(dev->persist->catas_wq);
165 spin_unlock_irq(&catas_lock);
166} 309}
167 310
168void __init mlx4_catas_init(void) 311int mlx4_catas_init(struct mlx4_dev *dev)
169{ 312{
170 INIT_WORK(&catas_work, catas_reset); 313 INIT_WORK(&dev->persist->catas_work, catas_reset);
314 dev->persist->catas_wq = create_singlethread_workqueue("mlx4_health");
315 if (!dev->persist->catas_wq)
316 return -ENOMEM;
317
318 return 0;
319}
320
321void mlx4_catas_end(struct mlx4_dev *dev)
322{
323 if (dev->persist->catas_wq) {
324 destroy_workqueue(dev->persist->catas_wq);
325 dev->persist->catas_wq = NULL;
326 }
171} 327}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 5c93d1451c44..a681d7c0bb9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -42,6 +42,7 @@
42#include <linux/mlx4/device.h> 42#include <linux/mlx4/device.h>
43#include <linux/semaphore.h> 43#include <linux/semaphore.h>
44#include <rdma/ib_smi.h> 44#include <rdma/ib_smi.h>
45#include <linux/delay.h>
45 46
46#include <asm/io.h> 47#include <asm/io.h>
47 48
@@ -182,6 +183,72 @@ static u8 mlx4_errno_to_status(int errno)
182 } 183 }
183} 184}
184 185
186static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
187 u8 op_modifier)
188{
189 switch (op) {
190 case MLX4_CMD_UNMAP_ICM:
191 case MLX4_CMD_UNMAP_ICM_AUX:
192 case MLX4_CMD_UNMAP_FA:
193 case MLX4_CMD_2RST_QP:
194 case MLX4_CMD_HW2SW_EQ:
195 case MLX4_CMD_HW2SW_CQ:
196 case MLX4_CMD_HW2SW_SRQ:
197 case MLX4_CMD_HW2SW_MPT:
198 case MLX4_CMD_CLOSE_HCA:
199 case MLX4_QP_FLOW_STEERING_DETACH:
200 case MLX4_CMD_FREE_RES:
201 case MLX4_CMD_CLOSE_PORT:
202 return CMD_STAT_OK;
203
204 case MLX4_CMD_QP_ATTACH:
205 /* On Detach case return success */
206 if (op_modifier == 0)
207 return CMD_STAT_OK;
208 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
209
210 default:
211 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
212 }
213}
214
215static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
216{
217 /* Any error during the closing commands below is considered fatal */
218 if (op == MLX4_CMD_CLOSE_HCA ||
219 op == MLX4_CMD_HW2SW_EQ ||
220 op == MLX4_CMD_HW2SW_CQ ||
221 op == MLX4_CMD_2RST_QP ||
222 op == MLX4_CMD_HW2SW_SRQ ||
223 op == MLX4_CMD_SYNC_TPT ||
224 op == MLX4_CMD_UNMAP_ICM ||
225 op == MLX4_CMD_UNMAP_ICM_AUX ||
226 op == MLX4_CMD_UNMAP_FA)
227 return 1;
228 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
229 * CMD_STAT_REG_BOUND.
230 * This status indicates that memory region has memory windows bound to it
231 * which may result from invalid user space usage and is not fatal.
232 */
233 if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
234 return 1;
235 return 0;
236}
237
238static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
239 int err)
240{
241 /* Only if reset flow is really active return code is based on
242 * command, otherwise current error code is returned.
243 */
244 if (mlx4_internal_err_reset) {
245 mlx4_enter_error_state(dev->persist);
246 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
247 }
248
249 return err;
250}
251
185static int comm_pending(struct mlx4_dev *dev) 252static int comm_pending(struct mlx4_dev *dev)
186{ 253{
187 struct mlx4_priv *priv = mlx4_priv(dev); 254 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -190,16 +257,30 @@ static int comm_pending(struct mlx4_dev *dev)
190 return (swab32(status) >> 31) != priv->cmd.comm_toggle; 257 return (swab32(status) >> 31) != priv->cmd.comm_toggle;
191} 258}
192 259
193static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) 260static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
194{ 261{
195 struct mlx4_priv *priv = mlx4_priv(dev); 262 struct mlx4_priv *priv = mlx4_priv(dev);
196 u32 val; 263 u32 val;
197 264
265 /* To avoid writing to unknown addresses after the device state was
266 * changed to internal error and the function was rest,
267 * check the INTERNAL_ERROR flag which is updated under
268 * device_state_mutex lock.
269 */
270 mutex_lock(&dev->persist->device_state_mutex);
271
272 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
273 mutex_unlock(&dev->persist->device_state_mutex);
274 return -EIO;
275 }
276
198 priv->cmd.comm_toggle ^= 1; 277 priv->cmd.comm_toggle ^= 1;
199 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); 278 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
200 __raw_writel((__force u32) cpu_to_be32(val), 279 __raw_writel((__force u32) cpu_to_be32(val),
201 &priv->mfunc.comm->slave_write); 280 &priv->mfunc.comm->slave_write);
202 mmiowb(); 281 mmiowb();
282 mutex_unlock(&dev->persist->device_state_mutex);
283 return 0;
203} 284}
204 285
205static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, 286static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
@@ -219,7 +300,13 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
219 300
220 /* Write command */ 301 /* Write command */
221 down(&priv->cmd.poll_sem); 302 down(&priv->cmd.poll_sem);
222 mlx4_comm_cmd_post(dev, cmd, param); 303 if (mlx4_comm_cmd_post(dev, cmd, param)) {
304 /* Only in case the device state is INTERNAL_ERROR,
305 * mlx4_comm_cmd_post returns with an error
306 */
307 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
308 goto out;
309 }
223 310
224 end = msecs_to_jiffies(timeout) + jiffies; 311 end = msecs_to_jiffies(timeout) + jiffies;
225 while (comm_pending(dev) && time_before(jiffies, end)) 312 while (comm_pending(dev) && time_before(jiffies, end))
@@ -231,18 +318,23 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
231 * is MLX4_DELAY_RESET_SLAVE*/ 318 * is MLX4_DELAY_RESET_SLAVE*/
232 if ((MLX4_COMM_CMD_RESET == cmd)) { 319 if ((MLX4_COMM_CMD_RESET == cmd)) {
233 err = MLX4_DELAY_RESET_SLAVE; 320 err = MLX4_DELAY_RESET_SLAVE;
321 goto out;
234 } else { 322 } else {
235 mlx4_warn(dev, "Communication channel timed out\n"); 323 mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
236 err = -ETIMEDOUT; 324 cmd);
325 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
237 } 326 }
238 } 327 }
239 328
329 if (err)
330 mlx4_enter_error_state(dev->persist);
331out:
240 up(&priv->cmd.poll_sem); 332 up(&priv->cmd.poll_sem);
241 return err; 333 return err;
242} 334}
243 335
244static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op, 336static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
245 u16 param, unsigned long timeout) 337 u16 param, u16 op, unsigned long timeout)
246{ 338{
247 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 339 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
248 struct mlx4_cmd_context *context; 340 struct mlx4_cmd_context *context;
@@ -258,34 +350,49 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
258 cmd->free_head = context->next; 350 cmd->free_head = context->next;
259 spin_unlock(&cmd->context_lock); 351 spin_unlock(&cmd->context_lock);
260 352
261 init_completion(&context->done); 353 reinit_completion(&context->done);
262 354
263 mlx4_comm_cmd_post(dev, op, param); 355 if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
356 /* Only in case the device state is INTERNAL_ERROR,
357 * mlx4_comm_cmd_post returns with an error
358 */
359 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
360 goto out;
361 }
264 362
265 if (!wait_for_completion_timeout(&context->done, 363 if (!wait_for_completion_timeout(&context->done,
266 msecs_to_jiffies(timeout))) { 364 msecs_to_jiffies(timeout))) {
267 mlx4_warn(dev, "communication channel command 0x%x timed out\n", 365 mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
268 op); 366 vhcr_cmd, op);
269 err = -EBUSY; 367 goto out_reset;
270 goto out;
271 } 368 }
272 369
273 err = context->result; 370 err = context->result;
274 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) { 371 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
275 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 372 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
276 op, context->fw_status); 373 vhcr_cmd, context->fw_status);
277 goto out; 374 if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
375 goto out_reset;
278 } 376 }
279 377
280out:
281 /* wait for comm channel ready 378 /* wait for comm channel ready
282 * this is necessary for prevention the race 379 * this is necessary for prevention the race
283 * when switching between event to polling mode 380 * when switching between event to polling mode
381 * Skipping this section in case the device is in FATAL_ERROR state,
382 * In this state, no commands are sent via the comm channel until
383 * the device has returned from reset.
284 */ 384 */
285 end = msecs_to_jiffies(timeout) + jiffies; 385 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
286 while (comm_pending(dev) && time_before(jiffies, end)) 386 end = msecs_to_jiffies(timeout) + jiffies;
287 cond_resched(); 387 while (comm_pending(dev) && time_before(jiffies, end))
388 cond_resched();
389 }
390 goto out;
288 391
392out_reset:
393 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
394 mlx4_enter_error_state(dev->persist);
395out:
289 spin_lock(&cmd->context_lock); 396 spin_lock(&cmd->context_lock);
290 context->next = cmd->free_head; 397 context->next = cmd->free_head;
291 cmd->free_head = context - cmd->context; 398 cmd->free_head = context - cmd->context;
@@ -296,10 +403,13 @@ out:
296} 403}
297 404
298int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, 405int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
299 unsigned long timeout) 406 u16 op, unsigned long timeout)
300{ 407{
408 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
409 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
410
301 if (mlx4_priv(dev)->cmd.use_events) 411 if (mlx4_priv(dev)->cmd.use_events)
302 return mlx4_comm_cmd_wait(dev, cmd, param, timeout); 412 return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
303 return mlx4_comm_cmd_poll(dev, cmd, param, timeout); 413 return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
304} 414}
305 415
@@ -307,7 +417,7 @@ static int cmd_pending(struct mlx4_dev *dev)
307{ 417{
308 u32 status; 418 u32 status;
309 419
310 if (pci_channel_offline(dev->pdev)) 420 if (pci_channel_offline(dev->persist->pdev))
311 return -EIO; 421 return -EIO;
312 422
313 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); 423 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -323,17 +433,21 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
323{ 433{
324 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 434 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
325 u32 __iomem *hcr = cmd->hcr; 435 u32 __iomem *hcr = cmd->hcr;
326 int ret = -EAGAIN; 436 int ret = -EIO;
327 unsigned long end; 437 unsigned long end;
328 438
329 mutex_lock(&cmd->hcr_mutex); 439 mutex_lock(&dev->persist->device_state_mutex);
330 440 /* To avoid writing to unknown addresses after the device state was
331 if (pci_channel_offline(dev->pdev)) { 441 * changed to internal error and the chip was reset,
442 * check the INTERNAL_ERROR flag which is updated under
443 * device_state_mutex lock.
444 */
445 if (pci_channel_offline(dev->persist->pdev) ||
446 (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
332 /* 447 /*
333 * Device is going through error recovery 448 * Device is going through error recovery
334 * and cannot accept commands. 449 * and cannot accept commands.
335 */ 450 */
336 ret = -EIO;
337 goto out; 451 goto out;
338 } 452 }
339 453
@@ -342,12 +456,11 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
342 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); 456 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
343 457
344 while (cmd_pending(dev)) { 458 while (cmd_pending(dev)) {
345 if (pci_channel_offline(dev->pdev)) { 459 if (pci_channel_offline(dev->persist->pdev)) {
346 /* 460 /*
347 * Device is going through error recovery 461 * Device is going through error recovery
348 * and cannot accept commands. 462 * and cannot accept commands.
349 */ 463 */
350 ret = -EIO;
351 goto out; 464 goto out;
352 } 465 }
353 466
@@ -391,7 +504,11 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
391 ret = 0; 504 ret = 0;
392 505
393out: 506out:
394 mutex_unlock(&cmd->hcr_mutex); 507 if (ret)
508 mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
509 op, ret, in_param, in_modifier, op_modifier);
510 mutex_unlock(&dev->persist->device_state_mutex);
511
395 return ret; 512 return ret;
396} 513}
397 514
@@ -428,8 +545,11 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
428 } 545 }
429 ret = mlx4_status_to_errno(vhcr->status); 546 ret = mlx4_status_to_errno(vhcr->status);
430 } 547 }
548 if (ret &&
549 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
550 ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
431 } else { 551 } else {
432 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, 552 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
433 MLX4_COMM_TIME + timeout); 553 MLX4_COMM_TIME + timeout);
434 if (!ret) { 554 if (!ret) {
435 if (out_is_imm) { 555 if (out_is_imm) {
@@ -443,9 +563,14 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
443 } 563 }
444 } 564 }
445 ret = mlx4_status_to_errno(vhcr->status); 565 ret = mlx4_status_to_errno(vhcr->status);
446 } else 566 } else {
447 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", 567 if (dev->persist->state &
448 op); 568 MLX4_DEVICE_STATE_INTERNAL_ERROR)
569 ret = mlx4_internal_err_ret_value(dev, op,
570 op_modifier);
571 else
572 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
573 }
449 } 574 }
450 575
451 mutex_unlock(&priv->cmd.slave_cmd_mutex); 576 mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -464,12 +589,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
464 589
465 down(&priv->cmd.poll_sem); 590 down(&priv->cmd.poll_sem);
466 591
467 if (pci_channel_offline(dev->pdev)) { 592 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
468 /* 593 /*
469 * Device is going through error recovery 594 * Device is going through error recovery
470 * and cannot accept commands. 595 * and cannot accept commands.
471 */ 596 */
472 err = -EIO; 597 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
473 goto out; 598 goto out;
474 } 599 }
475 600
@@ -483,16 +608,21 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
483 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 608 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
484 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); 609 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
485 if (err) 610 if (err)
486 goto out; 611 goto out_reset;
487 612
488 end = msecs_to_jiffies(timeout) + jiffies; 613 end = msecs_to_jiffies(timeout) + jiffies;
489 while (cmd_pending(dev) && time_before(jiffies, end)) { 614 while (cmd_pending(dev) && time_before(jiffies, end)) {
490 if (pci_channel_offline(dev->pdev)) { 615 if (pci_channel_offline(dev->persist->pdev)) {
491 /* 616 /*
492 * Device is going through error recovery 617 * Device is going through error recovery
493 * and cannot accept commands. 618 * and cannot accept commands.
494 */ 619 */
495 err = -EIO; 620 err = -EIO;
621 goto out_reset;
622 }
623
624 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
625 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
496 goto out; 626 goto out;
497 } 627 }
498 628
@@ -502,8 +632,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
502 if (cmd_pending(dev)) { 632 if (cmd_pending(dev)) {
503 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 633 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
504 op); 634 op);
505 err = -ETIMEDOUT; 635 err = -EIO;
506 goto out; 636 goto out_reset;
507 } 637 }
508 638
509 if (out_is_imm) 639 if (out_is_imm)
@@ -515,10 +645,17 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
515 stat = be32_to_cpu((__force __be32) 645 stat = be32_to_cpu((__force __be32)
516 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24; 646 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
517 err = mlx4_status_to_errno(stat); 647 err = mlx4_status_to_errno(stat);
518 if (err) 648 if (err) {
519 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 649 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
520 op, stat); 650 op, stat);
651 if (mlx4_closing_cmd_fatal_error(op, stat))
652 goto out_reset;
653 goto out;
654 }
521 655
656out_reset:
657 if (err)
658 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
522out: 659out:
523 up(&priv->cmd.poll_sem); 660 up(&priv->cmd.poll_sem);
524 return err; 661 return err;
@@ -565,17 +702,19 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
565 goto out; 702 goto out;
566 } 703 }
567 704
568 init_completion(&context->done); 705 reinit_completion(&context->done);
569 706
570 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 707 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
571 in_modifier, op_modifier, op, context->token, 1); 708 in_modifier, op_modifier, op, context->token, 1);
709 if (err)
710 goto out_reset;
572 711
573 if (!wait_for_completion_timeout(&context->done, 712 if (!wait_for_completion_timeout(&context->done,
574 msecs_to_jiffies(timeout))) { 713 msecs_to_jiffies(timeout))) {
575 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 714 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
576 op); 715 op);
577 err = -EBUSY; 716 err = -EIO;
578 goto out; 717 goto out_reset;
579 } 718 }
580 719
581 err = context->result; 720 err = context->result;
@@ -592,12 +731,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
592 else 731 else
593 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", 732 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
594 op, context->fw_status); 733 op, context->fw_status);
734 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
735 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
736 else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
737 goto out_reset;
738
595 goto out; 739 goto out;
596 } 740 }
597 741
598 if (out_is_imm) 742 if (out_is_imm)
599 *out_param = context->out_param; 743 *out_param = context->out_param;
600 744
745out_reset:
746 if (err)
747 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
601out: 748out:
602 spin_lock(&cmd->context_lock); 749 spin_lock(&cmd->context_lock);
603 context->next = cmd->free_head; 750 context->next = cmd->free_head;
@@ -612,10 +759,13 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
612 int out_is_imm, u32 in_modifier, u8 op_modifier, 759 int out_is_imm, u32 in_modifier, u8 op_modifier,
613 u16 op, unsigned long timeout, int native) 760 u16 op, unsigned long timeout, int native)
614{ 761{
615 if (pci_channel_offline(dev->pdev)) 762 if (pci_channel_offline(dev->persist->pdev))
616 return -EIO; 763 return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
617 764
618 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { 765 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
766 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
767 return mlx4_internal_err_ret_value(dev, op,
768 op_modifier);
619 if (mlx4_priv(dev)->cmd.use_events) 769 if (mlx4_priv(dev)->cmd.use_events)
620 return mlx4_cmd_wait(dev, in_param, out_param, 770 return mlx4_cmd_wait(dev, in_param, out_param,
621 out_is_imm, in_modifier, 771 out_is_imm, in_modifier,
@@ -631,7 +781,7 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
631EXPORT_SYMBOL_GPL(__mlx4_cmd); 781EXPORT_SYMBOL_GPL(__mlx4_cmd);
632 782
633 783
634static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) 784int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
635{ 785{
636 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL, 786 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
637 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 787 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
@@ -751,7 +901,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
751 index = be32_to_cpu(smp->attr_mod); 901 index = be32_to_cpu(smp->attr_mod);
752 if (port < 1 || port > dev->caps.num_ports) 902 if (port < 1 || port > dev->caps.num_ports)
753 return -EINVAL; 903 return -EINVAL;
754 table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL); 904 table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
905 sizeof(*table) * 32, GFP_KERNEL);
906
755 if (!table) 907 if (!table)
756 return -ENOMEM; 908 return -ENOMEM;
757 /* need to get the full pkey table because the paravirtualized 909 /* need to get the full pkey table because the paravirtualized
@@ -1071,7 +1223,7 @@ static struct mlx4_cmd_info cmd_info[] = {
1071 { 1223 {
1072 .opcode = MLX4_CMD_HW2SW_EQ, 1224 .opcode = MLX4_CMD_HW2SW_EQ,
1073 .has_inbox = false, 1225 .has_inbox = false,
1074 .has_outbox = true, 1226 .has_outbox = false,
1075 .out_is_imm = false, 1227 .out_is_imm = false,
1076 .encode_slave_id = true, 1228 .encode_slave_id = true,
1077 .verify = NULL, 1229 .verify = NULL,
@@ -1431,6 +1583,15 @@ static struct mlx4_cmd_info cmd_info[] = {
1431 .verify = NULL, 1583 .verify = NULL,
1432 .wrapper = mlx4_CMD_EPERM_wrapper 1584 .wrapper = mlx4_CMD_EPERM_wrapper
1433 }, 1585 },
1586 {
1587 .opcode = MLX4_CMD_VIRT_PORT_MAP,
1588 .has_inbox = false,
1589 .has_outbox = false,
1590 .out_is_imm = false,
1591 .encode_slave_id = false,
1592 .verify = NULL,
1593 .wrapper = mlx4_CMD_EPERM_wrapper
1594 },
1434}; 1595};
1435 1596
1436static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, 1597static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
@@ -1460,8 +1621,10 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1460 ALIGN(sizeof(struct mlx4_vhcr_cmd), 1621 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1461 MLX4_ACCESS_MEM_ALIGN), 1); 1622 MLX4_ACCESS_MEM_ALIGN), 1);
1462 if (ret) { 1623 if (ret) {
1463 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n", 1624 if (!(dev->persist->state &
1464 __func__, ret); 1625 MLX4_DEVICE_STATE_INTERNAL_ERROR))
1626 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1627 __func__, ret);
1465 kfree(vhcr); 1628 kfree(vhcr);
1466 return ret; 1629 return ret;
1467 } 1630 }
@@ -1500,11 +1663,14 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1500 goto out_status; 1663 goto out_status;
1501 } 1664 }
1502 1665
1503 if (mlx4_ACCESS_MEM(dev, inbox->dma, slave, 1666 ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1504 vhcr->in_param, 1667 vhcr->in_param,
1505 MLX4_MAILBOX_SIZE, 1)) { 1668 MLX4_MAILBOX_SIZE, 1);
1506 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n", 1669 if (ret) {
1507 __func__, cmd->opcode); 1670 if (!(dev->persist->state &
1671 MLX4_DEVICE_STATE_INTERNAL_ERROR))
1672 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1673 __func__, cmd->opcode);
1508 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR; 1674 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1509 goto out_status; 1675 goto out_status;
1510 } 1676 }
@@ -1552,8 +1718,9 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1552 } 1718 }
1553 1719
1554 if (err) { 1720 if (err) {
1555 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n", 1721 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
1556 vhcr->op, slave, vhcr->errno, err); 1722 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1723 vhcr->op, slave, vhcr->errno, err);
1557 vhcr_cmd->status = mlx4_errno_to_status(err); 1724 vhcr_cmd->status = mlx4_errno_to_status(err);
1558 goto out_status; 1725 goto out_status;
1559 } 1726 }
@@ -1568,7 +1735,9 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1568 /* If we failed to write back the outbox after the 1735 /* If we failed to write back the outbox after the
1569 *command was successfully executed, we must fail this 1736 *command was successfully executed, we must fail this
1570 * slave, as it is now in undefined state */ 1737 * slave, as it is now in undefined state */
1571 mlx4_err(dev, "%s:Failed writing outbox\n", __func__); 1738 if (!(dev->persist->state &
1739 MLX4_DEVICE_STATE_INTERNAL_ERROR))
1740 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1572 goto out; 1741 goto out;
1573 } 1742 }
1574 } 1743 }
@@ -1847,8 +2016,11 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1847 break; 2016 break;
1848 case MLX4_COMM_CMD_VHCR_POST: 2017 case MLX4_COMM_CMD_VHCR_POST:
1849 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && 2018 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1850 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) 2019 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2020 mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2021 slave, cmd, slave_state[slave].last_cmd);
1851 goto reset_slave; 2022 goto reset_slave;
2023 }
1852 2024
1853 mutex_lock(&priv->cmd.slave_cmd_mutex); 2025 mutex_lock(&priv->cmd.slave_cmd_mutex);
1854 if (mlx4_master_process_vhcr(dev, slave, NULL)) { 2026 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
@@ -1882,7 +2054,18 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1882 2054
1883reset_slave: 2055reset_slave:
1884 /* cleanup any slave resources */ 2056 /* cleanup any slave resources */
1885 mlx4_delete_all_resources_for_slave(dev, slave); 2057 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2058 mlx4_delete_all_resources_for_slave(dev, slave);
2059
2060 if (cmd != MLX4_COMM_CMD_RESET) {
2061 mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2062 slave, cmd);
2063 /* Turn on internal error letting slave reset itself immeditaly,
2064 * otherwise it might take till timeout on command is passed
2065 */
2066 reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2067 }
2068
1886 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 2069 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1887 if (!slave_state[slave].is_slave_going_down) 2070 if (!slave_state[slave].is_slave_going_down)
1888 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; 2071 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
@@ -1958,17 +2141,28 @@ void mlx4_master_comm_channel(struct work_struct *work)
1958static int sync_toggles(struct mlx4_dev *dev) 2141static int sync_toggles(struct mlx4_dev *dev)
1959{ 2142{
1960 struct mlx4_priv *priv = mlx4_priv(dev); 2143 struct mlx4_priv *priv = mlx4_priv(dev);
1961 int wr_toggle; 2144 u32 wr_toggle;
1962 int rd_toggle; 2145 u32 rd_toggle;
1963 unsigned long end; 2146 unsigned long end;
1964 2147
1965 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31; 2148 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
1966 end = jiffies + msecs_to_jiffies(5000); 2149 if (wr_toggle == 0xffffffff)
2150 end = jiffies + msecs_to_jiffies(30000);
2151 else
2152 end = jiffies + msecs_to_jiffies(5000);
1967 2153
1968 while (time_before(jiffies, end)) { 2154 while (time_before(jiffies, end)) {
1969 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31; 2155 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
1970 if (rd_toggle == wr_toggle) { 2156 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
1971 priv->cmd.comm_toggle = rd_toggle; 2157 /* PCI might be offline */
2158 msleep(100);
2159 wr_toggle = swab32(readl(&priv->mfunc.comm->
2160 slave_write));
2161 continue;
2162 }
2163
2164 if (rd_toggle >> 31 == wr_toggle >> 31) {
2165 priv->cmd.comm_toggle = rd_toggle >> 31;
1972 return 0; 2166 return 0;
1973 } 2167 }
1974 2168
@@ -1997,11 +2191,12 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1997 2191
1998 if (mlx4_is_master(dev)) 2192 if (mlx4_is_master(dev))
1999 priv->mfunc.comm = 2193 priv->mfunc.comm =
2000 ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) + 2194 ioremap(pci_resource_start(dev->persist->pdev,
2195 priv->fw.comm_bar) +
2001 priv->fw.comm_base, MLX4_COMM_PAGESIZE); 2196 priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2002 else 2197 else
2003 priv->mfunc.comm = 2198 priv->mfunc.comm =
2004 ioremap(pci_resource_start(dev->pdev, 2) + 2199 ioremap(pci_resource_start(dev->persist->pdev, 2) +
2005 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); 2200 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2006 if (!priv->mfunc.comm) { 2201 if (!priv->mfunc.comm) {
2007 mlx4_err(dev, "Couldn't map communication vector\n"); 2202 mlx4_err(dev, "Couldn't map communication vector\n");
@@ -2073,13 +2268,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
2073 if (mlx4_init_resource_tracker(dev)) 2268 if (mlx4_init_resource_tracker(dev))
2074 goto err_thread; 2269 goto err_thread;
2075 2270
2076 err = mlx4_ARM_COMM_CHANNEL(dev);
2077 if (err) {
2078 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
2079 err);
2080 goto err_resource;
2081 }
2082
2083 } else { 2271 } else {
2084 err = sync_toggles(dev); 2272 err = sync_toggles(dev);
2085 if (err) { 2273 if (err) {
@@ -2089,8 +2277,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
2089 } 2277 }
2090 return 0; 2278 return 0;
2091 2279
2092err_resource:
2093 mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
2094err_thread: 2280err_thread:
2095 flush_workqueue(priv->mfunc.master.comm_wq); 2281 flush_workqueue(priv->mfunc.master.comm_wq);
2096 destroy_workqueue(priv->mfunc.master.comm_wq); 2282 destroy_workqueue(priv->mfunc.master.comm_wq);
@@ -2107,9 +2293,9 @@ err_comm_admin:
2107err_comm: 2293err_comm:
2108 iounmap(priv->mfunc.comm); 2294 iounmap(priv->mfunc.comm);
2109err_vhcr: 2295err_vhcr:
2110 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 2296 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2111 priv->mfunc.vhcr, 2297 priv->mfunc.vhcr,
2112 priv->mfunc.vhcr_dma); 2298 priv->mfunc.vhcr_dma);
2113 priv->mfunc.vhcr = NULL; 2299 priv->mfunc.vhcr = NULL;
2114 return -ENOMEM; 2300 return -ENOMEM;
2115} 2301}
@@ -2120,7 +2306,6 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2120 int flags = 0; 2306 int flags = 0;
2121 2307
2122 if (!priv->cmd.initialized) { 2308 if (!priv->cmd.initialized) {
2123 mutex_init(&priv->cmd.hcr_mutex);
2124 mutex_init(&priv->cmd.slave_cmd_mutex); 2309 mutex_init(&priv->cmd.slave_cmd_mutex);
2125 sema_init(&priv->cmd.poll_sem, 1); 2310 sema_init(&priv->cmd.poll_sem, 1);
2126 priv->cmd.use_events = 0; 2311 priv->cmd.use_events = 0;
@@ -2130,8 +2315,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2130 } 2315 }
2131 2316
2132 if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { 2317 if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2133 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + 2318 priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2134 MLX4_HCR_BASE, MLX4_HCR_SIZE); 2319 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2135 if (!priv->cmd.hcr) { 2320 if (!priv->cmd.hcr) {
2136 mlx4_err(dev, "Couldn't map command register\n"); 2321 mlx4_err(dev, "Couldn't map command register\n");
2137 goto err; 2322 goto err;
@@ -2140,7 +2325,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2140 } 2325 }
2141 2326
2142 if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { 2327 if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2143 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, 2328 priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2329 PAGE_SIZE,
2144 &priv->mfunc.vhcr_dma, 2330 &priv->mfunc.vhcr_dma,
2145 GFP_KERNEL); 2331 GFP_KERNEL);
2146 if (!priv->mfunc.vhcr) 2332 if (!priv->mfunc.vhcr)
@@ -2150,7 +2336,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
2150 } 2336 }
2151 2337
2152 if (!priv->cmd.pool) { 2338 if (!priv->cmd.pool) {
2153 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, 2339 priv->cmd.pool = pci_pool_create("mlx4_cmd",
2340 dev->persist->pdev,
2154 MLX4_MAILBOX_SIZE, 2341 MLX4_MAILBOX_SIZE,
2155 MLX4_MAILBOX_SIZE, 0); 2342 MLX4_MAILBOX_SIZE, 0);
2156 if (!priv->cmd.pool) 2343 if (!priv->cmd.pool)
@@ -2166,6 +2353,27 @@ err:
2166 return -ENOMEM; 2353 return -ENOMEM;
2167} 2354}
2168 2355
2356void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2357{
2358 struct mlx4_priv *priv = mlx4_priv(dev);
2359 int slave;
2360 u32 slave_read;
2361
2362 /* Report an internal error event to all
2363 * communication channels.
2364 */
2365 for (slave = 0; slave < dev->num_slaves; slave++) {
2366 slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2367 slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2368 __raw_writel((__force u32)cpu_to_be32(slave_read),
2369 &priv->mfunc.comm[slave].slave_read);
2370 /* Make sure that our comm channel write doesn't
2371 * get mixed in with writes from another CPU.
2372 */
2373 mmiowb();
2374 }
2375}
2376
2169void mlx4_multi_func_cleanup(struct mlx4_dev *dev) 2377void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2170{ 2378{
2171 struct mlx4_priv *priv = mlx4_priv(dev); 2379 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2181,6 +2389,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2181 kfree(priv->mfunc.master.slave_state); 2389 kfree(priv->mfunc.master.slave_state);
2182 kfree(priv->mfunc.master.vf_admin); 2390 kfree(priv->mfunc.master.vf_admin);
2183 kfree(priv->mfunc.master.vf_oper); 2391 kfree(priv->mfunc.master.vf_oper);
2392 dev->num_slaves = 0;
2184 } 2393 }
2185 2394
2186 iounmap(priv->mfunc.comm); 2395 iounmap(priv->mfunc.comm);
@@ -2202,7 +2411,7 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2202 } 2411 }
2203 if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && 2412 if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2204 (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { 2413 (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2205 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 2414 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2206 priv->mfunc.vhcr, priv->mfunc.vhcr_dma); 2415 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2207 priv->mfunc.vhcr = NULL; 2416 priv->mfunc.vhcr = NULL;
2208 } 2417 }
@@ -2229,6 +2438,11 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
2229 for (i = 0; i < priv->cmd.max_cmds; ++i) { 2438 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2230 priv->cmd.context[i].token = i; 2439 priv->cmd.context[i].token = i;
2231 priv->cmd.context[i].next = i + 1; 2440 priv->cmd.context[i].next = i + 1;
2441 /* To support fatal error flow, initialize all
2442 * cmd contexts to allow simulating completions
2443 * with complete() at any time.
2444 */
2445 init_completion(&priv->cmd.context[i].done);
2232 } 2446 }
2233 2447
2234 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; 2448 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
@@ -2306,8 +2520,9 @@ u32 mlx4_comm_get_version(void)
2306 2520
2307static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) 2521static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2308{ 2522{
2309 if ((vf < 0) || (vf >= dev->num_vfs)) { 2523 if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2310 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs); 2524 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2525 vf, dev->persist->num_vfs);
2311 return -EINVAL; 2526 return -EINVAL;
2312 } 2527 }
2313 2528
@@ -2316,7 +2531,7 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2316 2531
2317int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) 2532int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2318{ 2533{
2319 if (slave < 1 || slave > dev->num_vfs) { 2534 if (slave < 1 || slave > dev->persist->num_vfs) {
2320 mlx4_err(dev, 2535 mlx4_err(dev,
2321 "Bad slave number:%d (number of activated slaves: %lu)\n", 2536 "Bad slave number:%d (number of activated slaves: %lu)\n",
2322 slave, dev->num_slaves); 2537 slave, dev->num_slaves);
@@ -2325,6 +2540,25 @@ int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2325 return slave - 1; 2540 return slave - 1;
2326} 2541}
2327 2542
2543void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2544{
2545 struct mlx4_priv *priv = mlx4_priv(dev);
2546 struct mlx4_cmd_context *context;
2547 int i;
2548
2549 spin_lock(&priv->cmd.context_lock);
2550 if (priv->cmd.context) {
2551 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2552 context = &priv->cmd.context[i];
2553 context->fw_status = CMD_STAT_INTERNAL_ERR;
2554 context->result =
2555 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2556 complete(&context->done);
2557 }
2558 }
2559 spin_unlock(&priv->cmd.context_lock);
2560}
2561
2328struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave) 2562struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2329{ 2563{
2330 struct mlx4_active_ports actv_ports; 2564 struct mlx4_active_ports actv_ports;
@@ -2388,7 +2622,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2388 if (port <= 0 || port > dev->caps.num_ports) 2622 if (port <= 0 || port > dev->caps.num_ports)
2389 return slaves_pport; 2623 return slaves_pport;
2390 2624
2391 for (i = 0; i < dev->num_vfs + 1; i++) { 2625 for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2392 struct mlx4_active_ports actv_ports = 2626 struct mlx4_active_ports actv_ports =
2393 mlx4_get_active_ports(dev, i); 2627 mlx4_get_active_ports(dev, i);
2394 if (test_bit(port - 1, actv_ports.ports)) 2628 if (test_bit(port - 1, actv_ports.ports))
@@ -2408,7 +2642,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2408 2642
2409 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); 2643 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2410 2644
2411 for (i = 0; i < dev->num_vfs + 1; i++) { 2645 for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2412 struct mlx4_active_ports actv_ports = 2646 struct mlx4_active_ports actv_ports =
2413 mlx4_get_active_ports(dev, i); 2647 mlx4_get_active_ports(dev, i);
2414 if (bitmap_equal(crit_ports->ports, actv_ports.ports, 2648 if (bitmap_equal(crit_ports->ports, actv_ports.ports,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 999014413b1a..90b5309cdb5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/mlx4/device.h> 34#include <linux/mlx4/device.h>
35#include <linux/clocksource.h>
35 36
36#include "mlx4_en.h" 37#include "mlx4_en.h"
37 38
@@ -147,12 +148,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
147 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, 148 struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
148 ptp_clock_info); 149 ptp_clock_info);
149 unsigned long flags; 150 unsigned long flags;
150 s64 now;
151 151
152 write_lock_irqsave(&mdev->clock_lock, flags); 152 write_lock_irqsave(&mdev->clock_lock, flags);
153 now = timecounter_read(&mdev->clock); 153 timecounter_adjtime(&mdev->clock, delta);
154 now += delta;
155 timecounter_init(&mdev->clock, &mdev->cycles, now);
156 write_unlock_irqrestore(&mdev->clock_lock, flags); 154 write_unlock_irqrestore(&mdev->clock_lock, flags);
157 155
158 return 0; 156 return 0;
@@ -243,7 +241,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
243{ 241{
244 struct mlx4_dev *dev = mdev->dev; 242 struct mlx4_dev *dev = mdev->dev;
245 unsigned long flags; 243 unsigned long flags;
246 u64 ns; 244 u64 ns, zero = 0;
247 245
248 rwlock_init(&mdev->clock_lock); 246 rwlock_init(&mdev->clock_lock);
249 247
@@ -268,7 +266,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
268 /* Calculate period in seconds to call the overflow watchdog - to make 266 /* Calculate period in seconds to call the overflow watchdog - to make
269 * sure counter is checked at least once every wrap around. 267 * sure counter is checked at least once every wrap around.
270 */ 268 */
271 ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask); 269 ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
272 do_div(ns, NSEC_PER_SEC / 2 / HZ); 270 do_div(ns, NSEC_PER_SEC / 2 / HZ);
273 mdev->overflow_period = ns; 271 mdev->overflow_period = ns;
274 272
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 82322b1c8411..22da4d0d0f05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -70,10 +70,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
70 /* Allocate HW buffers on provided NUMA node. 70 /* Allocate HW buffers on provided NUMA node.
71 * dev->numa_node is used in mtt range allocation flow. 71 * dev->numa_node is used in mtt range allocation flow.
72 */ 72 */
73 set_dev_node(&mdev->dev->pdev->dev, node); 73 set_dev_node(&mdev->dev->persist->pdev->dev, node);
74 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, 74 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
75 cq->buf_size, 2 * PAGE_SIZE); 75 cq->buf_size, 2 * PAGE_SIZE);
76 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); 76 set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
77 if (err) 77 if (err)
78 goto err_cq; 78 goto err_cq;
79 79
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 90e0f045a6bc..a7b58ba8492b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -92,7 +92,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
92 (u16) (mdev->dev->caps.fw_ver >> 32), 92 (u16) (mdev->dev->caps.fw_ver >> 32),
93 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), 93 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
94 (u16) (mdev->dev->caps.fw_ver & 0xffff)); 94 (u16) (mdev->dev->caps.fw_ver & 0xffff));
95 strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 95 strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
96 sizeof(drvinfo->bus_info)); 96 sizeof(drvinfo->bus_info));
97 drvinfo->n_stats = 0; 97 drvinfo->n_stats = 0;
98 drvinfo->regdump_len = 0; 98 drvinfo->regdump_len = 0;
@@ -770,22 +770,20 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
770 return 0; 770 return 0;
771 } 771 }
772 772
773 proto_admin = cpu_to_be32(ptys_adv); 773 proto_admin = cmd->autoneg == AUTONEG_ENABLE ?
774 if (speed >= 0 && speed != priv->port_state.link_speed) 774 cpu_to_be32(ptys_adv) :
775 /* If speed was set then speed decides :-) */ 775 speed_set_ptys_admin(priv, speed,
776 proto_admin = speed_set_ptys_admin(priv, speed, 776 ptys_reg.eth_proto_cap);
777 ptys_reg.eth_proto_cap);
778 777
779 proto_admin &= ptys_reg.eth_proto_cap; 778 proto_admin &= ptys_reg.eth_proto_cap;
780
781 if (proto_admin == ptys_reg.eth_proto_admin)
782 return 0; /* Nothing to change */
783
784 if (!proto_admin) { 779 if (!proto_admin) {
785 en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); 780 en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
786 return -EINVAL; /* nothing to change due to bad input */ 781 return -EINVAL; /* nothing to change due to bad input */
787 } 782 }
788 783
784 if (proto_admin == ptys_reg.eth_proto_admin)
785 return 0; /* Nothing to change */
786
789 en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", 787 en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
790 be32_to_cpu(proto_admin)); 788 be32_to_cpu(proto_admin));
791 789
@@ -798,9 +796,9 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
798 return ret; 796 return ret;
799 } 797 }
800 798
801 en_warn(priv, "Port link mode changed, restarting port...\n");
802 mutex_lock(&priv->mdev->state_lock); 799 mutex_lock(&priv->mdev->state_lock);
803 if (priv->port_up) { 800 if (priv->port_up) {
801 en_warn(priv, "Port link mode changed, restarting port...\n");
804 mlx4_en_stop_port(dev, 1); 802 mlx4_en_stop_port(dev, 1);
805 if (mlx4_en_start_port(dev)) 803 if (mlx4_en_start_port(dev))
806 en_err(priv, "Failed restarting port %d\n", priv->port); 804 en_err(priv, "Failed restarting port %d\n", priv->port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 9f16f754137b..58d5a07d0ff4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -214,6 +214,8 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
214 iounmap(mdev->uar_map); 214 iounmap(mdev->uar_map);
215 mlx4_uar_free(dev, &mdev->priv_uar); 215 mlx4_uar_free(dev, &mdev->priv_uar);
216 mlx4_pd_free(dev, mdev->priv_pdn); 216 mlx4_pd_free(dev, mdev->priv_pdn);
217 if (mdev->nb.notifier_call)
218 unregister_netdevice_notifier(&mdev->nb);
217 kfree(mdev); 219 kfree(mdev);
218} 220}
219 221
@@ -241,8 +243,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
241 spin_lock_init(&mdev->uar_lock); 243 spin_lock_init(&mdev->uar_lock);
242 244
243 mdev->dev = dev; 245 mdev->dev = dev;
244 mdev->dma_device = &(dev->pdev->dev); 246 mdev->dma_device = &dev->persist->pdev->dev;
245 mdev->pdev = dev->pdev; 247 mdev->pdev = dev->persist->pdev;
246 mdev->device_up = false; 248 mdev->device_up = false;
247 249
248 mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); 250 mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
@@ -298,6 +300,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
298 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) 300 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
299 mdev->pndev[i] = NULL; 301 mdev->pndev[i] = NULL;
300 } 302 }
303 /* register notifier */
304 mdev->nb.notifier_call = mlx4_en_netdev_event;
305 if (register_netdevice_notifier(&mdev->nb)) {
306 mdev->nb.notifier_call = NULL;
307 mlx4_err(mdev, "Failed to create notifier\n");
308 }
301 309
302 return mdev; 310 return mdev;
303 311
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index ac6a8f1eea6c..2a210c4efb89 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2062,6 +2062,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2062 /* Detach the netdev so tasks would not attempt to access it */ 2062 /* Detach the netdev so tasks would not attempt to access it */
2063 mutex_lock(&mdev->state_lock); 2063 mutex_lock(&mdev->state_lock);
2064 mdev->pndev[priv->port] = NULL; 2064 mdev->pndev[priv->port] = NULL;
2065 mdev->upper[priv->port] = NULL;
2065 mutex_unlock(&mdev->state_lock); 2066 mutex_unlock(&mdev->state_lock);
2066 2067
2067 mlx4_en_free_resources(priv); 2068 mlx4_en_free_resources(priv);
@@ -2201,6 +2202,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
2201 return ret; 2202 return ret;
2202 } 2203 }
2203 2204
2205 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2206 en_info(priv, "Turn %s TX vlan strip offload\n",
2207 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2208
2204 if (features & NETIF_F_LOOPBACK) 2209 if (features & NETIF_F_LOOPBACK)
2205 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 2210 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
2206 else 2211 else
@@ -2441,6 +2446,180 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2441#endif 2446#endif
2442}; 2447};
2443 2448
2449struct mlx4_en_bond {
2450 struct work_struct work;
2451 struct mlx4_en_priv *priv;
2452 int is_bonded;
2453 struct mlx4_port_map port_map;
2454};
2455
2456static void mlx4_en_bond_work(struct work_struct *work)
2457{
2458 struct mlx4_en_bond *bond = container_of(work,
2459 struct mlx4_en_bond,
2460 work);
2461 int err = 0;
2462 struct mlx4_dev *dev = bond->priv->mdev->dev;
2463
2464 if (bond->is_bonded) {
2465 if (!mlx4_is_bonded(dev)) {
2466 err = mlx4_bond(dev);
2467 if (err)
2468 en_err(bond->priv, "Fail to bond device\n");
2469 }
2470 if (!err) {
2471 err = mlx4_port_map_set(dev, &bond->port_map);
2472 if (err)
2473 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2474 bond->port_map.port1,
2475 bond->port_map.port2,
2476 err);
2477 }
2478 } else if (mlx4_is_bonded(dev)) {
2479 err = mlx4_unbond(dev);
2480 if (err)
2481 en_err(bond->priv, "Fail to unbond device\n");
2482 }
2483 dev_put(bond->priv->dev);
2484 kfree(bond);
2485}
2486
2487static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2488 u8 v2p_p1, u8 v2p_p2)
2489{
2490 struct mlx4_en_bond *bond = NULL;
2491
2492 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2493 if (!bond)
2494 return -ENOMEM;
2495
2496 INIT_WORK(&bond->work, mlx4_en_bond_work);
2497 bond->priv = priv;
2498 bond->is_bonded = is_bonded;
2499 bond->port_map.port1 = v2p_p1;
2500 bond->port_map.port2 = v2p_p2;
2501 dev_hold(priv->dev);
2502 queue_work(priv->mdev->workqueue, &bond->work);
2503 return 0;
2504}
2505
2506int mlx4_en_netdev_event(struct notifier_block *this,
2507 unsigned long event, void *ptr)
2508{
2509 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2510 u8 port = 0;
2511 struct mlx4_en_dev *mdev;
2512 struct mlx4_dev *dev;
2513 int i, num_eth_ports = 0;
2514 bool do_bond = true;
2515 struct mlx4_en_priv *priv;
2516 u8 v2p_port1 = 0;
2517 u8 v2p_port2 = 0;
2518
2519 if (!net_eq(dev_net(ndev), &init_net))
2520 return NOTIFY_DONE;
2521
2522 mdev = container_of(this, struct mlx4_en_dev, nb);
2523 dev = mdev->dev;
2524
2525 /* Go into this mode only when two network devices set on two ports
2526 * of the same mlx4 device are slaves of the same bonding master
2527 */
2528 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2529 ++num_eth_ports;
2530 if (!port && (mdev->pndev[i] == ndev))
2531 port = i;
2532 mdev->upper[i] = mdev->pndev[i] ?
2533 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2534 /* condition not met: network device is a slave */
2535 if (!mdev->upper[i])
2536 do_bond = false;
2537 if (num_eth_ports < 2)
2538 continue;
2539 /* condition not met: same master */
2540 if (mdev->upper[i] != mdev->upper[i-1])
2541 do_bond = false;
2542 }
2543 /* condition not met: 2 salves */
2544 do_bond = (num_eth_ports == 2) ? do_bond : false;
2545
2546 /* handle only events that come with enough info */
2547 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2548 return NOTIFY_DONE;
2549
2550 priv = netdev_priv(ndev);
2551 if (do_bond) {
2552 struct netdev_notifier_bonding_info *notifier_info = ptr;
2553 struct netdev_bonding_info *bonding_info =
2554 &notifier_info->bonding_info;
2555
2556 /* required mode 1, 2 or 4 */
2557 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2558 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2559 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2560 do_bond = false;
2561
2562 /* require exactly 2 slaves */
2563 if (bonding_info->master.num_slaves != 2)
2564 do_bond = false;
2565
2566 /* calc v2p */
2567 if (do_bond) {
2568 if (bonding_info->master.bond_mode ==
2569 BOND_MODE_ACTIVEBACKUP) {
2570 /* in active-backup mode virtual ports are
2571 * mapped to the physical port of the active
2572 * slave */
2573 if (bonding_info->slave.state ==
2574 BOND_STATE_BACKUP) {
2575 if (port == 1) {
2576 v2p_port1 = 2;
2577 v2p_port2 = 2;
2578 } else {
2579 v2p_port1 = 1;
2580 v2p_port2 = 1;
2581 }
2582 } else { /* BOND_STATE_ACTIVE */
2583 if (port == 1) {
2584 v2p_port1 = 1;
2585 v2p_port2 = 1;
2586 } else {
2587 v2p_port1 = 2;
2588 v2p_port2 = 2;
2589 }
2590 }
2591 } else { /* Active-Active */
2592 /* in active-active mode a virtual port is
2593 * mapped to the native physical port if and only
2594 * if the physical port is up */
2595 __s8 link = bonding_info->slave.link;
2596
2597 if (port == 1)
2598 v2p_port2 = 2;
2599 else
2600 v2p_port1 = 1;
2601 if ((link == BOND_LINK_UP) ||
2602 (link == BOND_LINK_FAIL)) {
2603 if (port == 1)
2604 v2p_port1 = 1;
2605 else
2606 v2p_port2 = 2;
2607 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2608 if (port == 1)
2609 v2p_port1 = 2;
2610 else
2611 v2p_port2 = 1;
2612 }
2613 }
2614 }
2615 }
2616
2617 mlx4_en_queue_bond_work(priv, do_bond,
2618 v2p_port1, v2p_port2);
2619
2620 return NOTIFY_DONE;
2621}
2622
2444int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2623int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2445 struct mlx4_en_port_profile *prof) 2624 struct mlx4_en_port_profile *prof)
2446{ 2625{
@@ -2458,7 +2637,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2458 netif_set_real_num_tx_queues(dev, prof->tx_ring_num); 2637 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2459 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 2638 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2460 2639
2461 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 2640 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
2462 dev->dev_port = port - 1; 2641 dev->dev_port = port - 1;
2463 2642
2464 /* 2643 /*
@@ -2623,6 +2802,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2623 } 2802 }
2624 2803
2625 mdev->pndev[port] = dev; 2804 mdev->pndev[port] = dev;
2805 mdev->upper[port] = NULL;
2626 2806
2627 netif_carrier_off(dev); 2807 netif_carrier_off(dev);
2628 mlx4_en_set_default_moderation(priv); 2808 mlx4_en_set_default_moderation(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index f1a5500ff72d..34f2fdf4fe5d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -50,10 +50,14 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
50 context->mtu_msgmax = 0xff; 50 context->mtu_msgmax = 0xff;
51 if (!is_tx && !rss) 51 if (!is_tx && !rss)
52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); 52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
53 if (is_tx) 53 if (is_tx) {
54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); 54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
55 else 55 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
56 context->params2 |= MLX4_QP_BIT_FPP;
57
58 } else {
56 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
60 }
57 context->usr_page = cpu_to_be32(mdev->priv_uar.index); 61 context->usr_page = cpu_to_be32(mdev->priv_uar.index);
58 context->local_qpn = cpu_to_be32(qpn); 62 context->local_qpn = cpu_to_be32(qpn);
59 context->pri_path.ackto = 1 & 0x07; 63 context->pri_path.ackto = 1 & 0x07;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a0474eb94aa3..698d60de1255 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -162,6 +162,10 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
162 if (mlx4_alloc_pages(priv, &ring->page_alloc[i], 162 if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
163 frag_info, GFP_KERNEL | __GFP_COLD)) 163 frag_info, GFP_KERNEL | __GFP_COLD))
164 goto out; 164 goto out;
165
166 en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n",
167 i, ring->page_alloc[i].page_size,
168 atomic_read(&ring->page_alloc[i].page->_count));
165 } 169 }
166 return 0; 170 return 0;
167 171
@@ -387,10 +391,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
387 ring->rx_info, tmp); 391 ring->rx_info, tmp);
388 392
389 /* Allocate HW buffers on provided NUMA node */ 393 /* Allocate HW buffers on provided NUMA node */
390 set_dev_node(&mdev->dev->pdev->dev, node); 394 set_dev_node(&mdev->dev->persist->pdev->dev, node);
391 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 395 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
392 ring->buf_size, 2 * PAGE_SIZE); 396 ring->buf_size, 2 * PAGE_SIZE);
393 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); 397 set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
394 if (err) 398 if (err)
395 goto err_info; 399 goto err_info;
396 400
@@ -1059,8 +1063,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
1059 (eff_mtu > buf_size + frag_sizes[i]) ? 1063 (eff_mtu > buf_size + frag_sizes[i]) ?
1060 frag_sizes[i] : eff_mtu - buf_size; 1064 frag_sizes[i] : eff_mtu - buf_size;
1061 priv->frag_info[i].frag_prefix_size = buf_size; 1065 priv->frag_info[i].frag_prefix_size = buf_size;
1062 priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i], 1066 priv->frag_info[i].frag_stride =
1063 SMP_CACHE_BYTES); 1067 ALIGN(priv->frag_info[i].frag_size,
1068 SMP_CACHE_BYTES);
1064 buf_size += priv->frag_info[i].frag_size; 1069 buf_size += priv->frag_info[i].frag_size;
1065 i++; 1070 i++;
1066 } 1071 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index e3357bf523df..55f9f5c5344e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -91,10 +91,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
91 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 91 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
92 92
93 /* Allocate HW buffers on provided NUMA node */ 93 /* Allocate HW buffers on provided NUMA node */
94 set_dev_node(&mdev->dev->pdev->dev, node); 94 set_dev_node(&mdev->dev->persist->pdev->dev, node);
95 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 95 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
96 2 * PAGE_SIZE); 96 2 * PAGE_SIZE);
97 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); 97 set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
98 if (err) { 98 if (err) {
99 en_err(priv, "Failed allocating hwq resources\n"); 99 en_err(priv, "Failed allocating hwq resources\n");
100 goto err_bounce; 100 goto err_bounce;
@@ -682,8 +682,8 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
682 if (dev->num_tc) 682 if (dev->num_tc)
683 return skb_tx_hash(dev, skb); 683 return skb_tx_hash(dev, skb);
684 684
685 if (vlan_tx_tag_present(skb)) 685 if (skb_vlan_tag_present(skb))
686 up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; 686 up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
687 687
688 return fallback(dev, skb) % rings_p_up + up * rings_p_up; 688 return fallback(dev, skb) % rings_p_up + up * rings_p_up;
689} 689}
@@ -742,8 +742,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
742 goto tx_drop; 742 goto tx_drop;
743 } 743 }
744 744
745 if (vlan_tx_tag_present(skb)) 745 if (skb_vlan_tag_present(skb))
746 vlan_tag = vlan_tx_tag_get(skb); 746 vlan_tag = skb_vlan_tag_get(skb);
747 747
748 748
749 netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); 749 netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -930,7 +930,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
930 real_size = (real_size / 16) & 0x3f; 930 real_size = (real_size / 16) & 0x3f;
931 931
932 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && 932 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce &&
933 !vlan_tx_tag_present(skb) && send_doorbell) { 933 !skb_vlan_tag_present(skb) && send_doorbell) {
934 tx_desc->ctrl.bf_qpn = ring->doorbell_qpn | 934 tx_desc->ctrl.bf_qpn = ring->doorbell_qpn |
935 cpu_to_be32(real_size); 935 cpu_to_be32(real_size);
936 936
@@ -952,7 +952,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
952 } else { 952 } else {
953 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); 953 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
954 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * 954 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
955 !!vlan_tx_tag_present(skb); 955 !!skb_vlan_tag_present(skb);
956 tx_desc->ctrl.fence_size = real_size; 956 tx_desc->ctrl.fence_size = real_size;
957 957
958 /* Ensure new descriptor hits memory 958 /* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 3d275fbaf0eb..264bc15c1ff2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -88,6 +88,8 @@ static u64 get_async_ev_mask(struct mlx4_dev *dev)
88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; 88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) 89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); 90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
91 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
92 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
91 93
92 return async_ev_mask; 94 return async_ev_mask;
93} 95}
@@ -237,7 +239,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
237 struct mlx4_eqe eqe; 239 struct mlx4_eqe eqe;
238 240
239 /*don't send if we don't have the that slave */ 241 /*don't send if we don't have the that slave */
240 if (dev->num_vfs < slave) 242 if (dev->persist->num_vfs < slave)
241 return 0; 243 return 0;
242 memset(&eqe, 0, sizeof eqe); 244 memset(&eqe, 0, sizeof eqe);
243 245
@@ -255,7 +257,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
255 struct mlx4_eqe eqe; 257 struct mlx4_eqe eqe;
256 258
257 /*don't send if we don't have the that slave */ 259 /*don't send if we don't have the that slave */
258 if (dev->num_vfs < slave) 260 if (dev->persist->num_vfs < slave)
259 return 0; 261 return 0;
260 memset(&eqe, 0, sizeof eqe); 262 memset(&eqe, 0, sizeof eqe);
261 263
@@ -310,7 +312,7 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
310 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, 312 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
311 port); 313 port);
312 314
313 for (i = 0; i < dev->num_vfs + 1; i++) 315 for (i = 0; i < dev->persist->num_vfs + 1; i++)
314 if (test_bit(i, slaves_pport.slaves)) 316 if (test_bit(i, slaves_pport.slaves))
315 set_and_calc_slave_port_state(dev, i, port, 317 set_and_calc_slave_port_state(dev, i, port,
316 event, &gen_event); 318 event, &gen_event);
@@ -429,8 +431,14 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
429 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { 431 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
430 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n", 432 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
431 i); 433 i);
432 434 /* In case of 'Reset flow' FLR can be generated for
433 mlx4_delete_all_resources_for_slave(dev, i); 435 * a slave before mlx4_load_one is done.
436 * make sure interface is up before trying to delete
437 * slave resources which weren't allocated yet.
438 */
439 if (dev->persist->interface_state &
440 MLX4_INTERFACE_STATE_UP)
441 mlx4_delete_all_resources_for_slave(dev, i);
434 /*return the slave to running mode*/ 442 /*return the slave to running mode*/
435 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 443 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
436 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; 444 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
@@ -560,7 +568,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
560 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 568 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
561 if (!mlx4_is_master(dev)) 569 if (!mlx4_is_master(dev))
562 break; 570 break;
563 for (i = 0; i < dev->num_vfs + 1; i++) { 571 for (i = 0; i < dev->persist->num_vfs + 1;
572 i++) {
564 if (!test_bit(i, slaves_port.slaves)) 573 if (!test_bit(i, slaves_port.slaves))
565 continue; 574 continue;
566 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 575 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
@@ -596,7 +605,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
596 if (!mlx4_is_master(dev)) 605 if (!mlx4_is_master(dev))
597 break; 606 break;
598 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 607 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
599 for (i = 0; i < dev->num_vfs + 1; i++) { 608 for (i = 0;
609 i < dev->persist->num_vfs + 1;
610 i++) {
600 if (!test_bit(i, slaves_port.slaves)) 611 if (!test_bit(i, slaves_port.slaves))
601 continue; 612 continue;
602 if (i == mlx4_master_func_num(dev)) 613 if (i == mlx4_master_func_num(dev))
@@ -727,6 +738,26 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
727 (unsigned long) eqe); 738 (unsigned long) eqe);
728 break; 739 break;
729 740
741 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
742 switch (eqe->subtype) {
743 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
744 mlx4_warn(dev, "Bad cable detected on port %u\n",
745 eqe->event.bad_cable.port);
746 break;
747 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
748 mlx4_warn(dev, "Unsupported cable detected\n");
749 break;
750 default:
751 mlx4_dbg(dev,
752 "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
753 eqe->type, eqe->subtype, eq->eqn,
754 eq->cons_index, eqe->owner, eq->nent,
755 !!(eqe->owner & 0x80) ^
756 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
757 break;
758 }
759 break;
760
730 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 761 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
731 case MLX4_EVENT_TYPE_ECC_DETECT: 762 case MLX4_EVENT_TYPE_ECC_DETECT:
732 default: 763 default:
@@ -837,12 +868,10 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
837 MLX4_CMD_WRAPPED); 868 MLX4_CMD_WRAPPED);
838} 869}
839 870
840static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 871static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num)
841 int eq_num)
842{ 872{
843 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 873 return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ,
844 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, 874 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
845 MLX4_CMD_WRAPPED);
846} 875}
847 876
848static int mlx4_num_eq_uar(struct mlx4_dev *dev) 877static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -865,7 +894,7 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
865 894
866 if (!priv->eq_table.uar_map[index]) { 895 if (!priv->eq_table.uar_map[index]) {
867 priv->eq_table.uar_map[index] = 896 priv->eq_table.uar_map[index] =
868 ioremap(pci_resource_start(dev->pdev, 2) + 897 ioremap(pci_resource_start(dev->persist->pdev, 2) +
869 ((eq->eqn / 4) << PAGE_SHIFT), 898 ((eq->eqn / 4) << PAGE_SHIFT),
870 PAGE_SIZE); 899 PAGE_SIZE);
871 if (!priv->eq_table.uar_map[index]) { 900 if (!priv->eq_table.uar_map[index]) {
@@ -928,8 +957,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
928 eq_context = mailbox->buf; 957 eq_context = mailbox->buf;
929 958
930 for (i = 0; i < npages; ++i) { 959 for (i = 0; i < npages; ++i) {
931 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 960 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist->
932 PAGE_SIZE, &t, GFP_KERNEL); 961 pdev->dev,
962 PAGE_SIZE, &t,
963 GFP_KERNEL);
933 if (!eq->page_list[i].buf) 964 if (!eq->page_list[i].buf)
934 goto err_out_free_pages; 965 goto err_out_free_pages;
935 966
@@ -995,7 +1026,7 @@ err_out_free_eq:
995err_out_free_pages: 1026err_out_free_pages:
996 for (i = 0; i < npages; ++i) 1027 for (i = 0; i < npages; ++i)
997 if (eq->page_list[i].buf) 1028 if (eq->page_list[i].buf)
998 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 1029 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
999 eq->page_list[i].buf, 1030 eq->page_list[i].buf,
1000 eq->page_list[i].map); 1031 eq->page_list[i].map);
1001 1032
@@ -1013,7 +1044,6 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
1013 struct mlx4_eq *eq) 1044 struct mlx4_eq *eq)
1014{ 1045{
1015 struct mlx4_priv *priv = mlx4_priv(dev); 1046 struct mlx4_priv *priv = mlx4_priv(dev);
1016 struct mlx4_cmd_mailbox *mailbox;
1017 int err; 1047 int err;
1018 int i; 1048 int i;
1019 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with 1049 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
@@ -1021,36 +1051,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
1021 */ 1051 */
1022 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; 1052 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
1023 1053
1024 mailbox = mlx4_alloc_cmd_mailbox(dev); 1054 err = mlx4_HW2SW_EQ(dev, eq->eqn);
1025 if (IS_ERR(mailbox))
1026 return;
1027
1028 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
1029 if (err) 1055 if (err)
1030 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 1056 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
1031 1057
1032 if (0) {
1033 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
1034 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
1035 if (i % 4 == 0)
1036 pr_cont("[%02x] ", i * 4);
1037 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
1038 if ((i + 1) % 4 == 0)
1039 pr_cont("\n");
1040 }
1041 }
1042 synchronize_irq(eq->irq); 1058 synchronize_irq(eq->irq);
1043 tasklet_disable(&eq->tasklet_ctx.task); 1059 tasklet_disable(&eq->tasklet_ctx.task);
1044 1060
1045 mlx4_mtt_cleanup(dev, &eq->mtt); 1061 mlx4_mtt_cleanup(dev, &eq->mtt);
1046 for (i = 0; i < npages; ++i) 1062 for (i = 0; i < npages; ++i)
1047 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 1063 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
1048 eq->page_list[i].buf, 1064 eq->page_list[i].buf,
1049 eq->page_list[i].map); 1065 eq->page_list[i].map);
1050 1066
1051 kfree(eq->page_list); 1067 kfree(eq->page_list);
1052 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); 1068 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1053 mlx4_free_cmd_mailbox(dev, mailbox);
1054} 1069}
1055 1070
1056static void mlx4_free_irqs(struct mlx4_dev *dev) 1071static void mlx4_free_irqs(struct mlx4_dev *dev)
@@ -1060,7 +1075,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
1060 int i, vec; 1075 int i, vec;
1061 1076
1062 if (eq_table->have_irq) 1077 if (eq_table->have_irq)
1063 free_irq(dev->pdev->irq, dev); 1078 free_irq(dev->persist->pdev->irq, dev);
1064 1079
1065 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 1080 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1066 if (eq_table->eq[i].have_irq) { 1081 if (eq_table->eq[i].have_irq) {
@@ -1089,7 +1104,8 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
1089{ 1104{
1090 struct mlx4_priv *priv = mlx4_priv(dev); 1105 struct mlx4_priv *priv = mlx4_priv(dev);
1091 1106
1092 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + 1107 priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev,
1108 priv->fw.clr_int_bar) +
1093 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 1109 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1094 if (!priv->clr_base) { 1110 if (!priv->clr_base) {
1095 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); 1111 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
@@ -1212,13 +1228,13 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
1212 i * MLX4_IRQNAME_SIZE, 1228 i * MLX4_IRQNAME_SIZE,
1213 MLX4_IRQNAME_SIZE, 1229 MLX4_IRQNAME_SIZE,
1214 "mlx4-comp-%d@pci:%s", i, 1230 "mlx4-comp-%d@pci:%s", i,
1215 pci_name(dev->pdev)); 1231 pci_name(dev->persist->pdev));
1216 } else { 1232 } else {
1217 snprintf(priv->eq_table.irq_names + 1233 snprintf(priv->eq_table.irq_names +
1218 i * MLX4_IRQNAME_SIZE, 1234 i * MLX4_IRQNAME_SIZE,
1219 MLX4_IRQNAME_SIZE, 1235 MLX4_IRQNAME_SIZE,
1220 "mlx4-async@pci:%s", 1236 "mlx4-async@pci:%s",
1221 pci_name(dev->pdev)); 1237 pci_name(dev->persist->pdev));
1222 } 1238 }
1223 1239
1224 eq_name = priv->eq_table.irq_names + 1240 eq_name = priv->eq_table.irq_names +
@@ -1235,8 +1251,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
1235 snprintf(priv->eq_table.irq_names, 1251 snprintf(priv->eq_table.irq_names,
1236 MLX4_IRQNAME_SIZE, 1252 MLX4_IRQNAME_SIZE,
1237 DRV_NAME "@pci:%s", 1253 DRV_NAME "@pci:%s",
1238 pci_name(dev->pdev)); 1254 pci_name(dev->persist->pdev));
1239 err = request_irq(dev->pdev->irq, mlx4_interrupt, 1255 err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
1240 IRQF_SHARED, priv->eq_table.irq_names, dev); 1256 IRQF_SHARED, priv->eq_table.irq_names, dev);
1241 if (err) 1257 if (err)
1242 goto err_out_async; 1258 goto err_out_async;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 982861d1df44..5a21e5dc94cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -84,13 +84,10 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
84 [ 1] = "UC transport", 84 [ 1] = "UC transport",
85 [ 2] = "UD transport", 85 [ 2] = "UD transport",
86 [ 3] = "XRC transport", 86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
89 [ 6] = "SRQ support", 87 [ 6] = "SRQ support",
90 [ 7] = "IPoIB checksum offload", 88 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter", 89 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter", 90 [ 9] = "Q_Key violation counter",
93 [10] = "VMM",
94 [12] = "Dual Port Different Protocol (DPDP) support", 91 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers", 92 [15] = "Big LSO headers",
96 [16] = "MW support", 93 [16] = "MW support",
@@ -99,12 +96,11 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
99 [19] = "Raw multicast support", 96 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support", 97 [20] = "Address vector port checking support",
101 [21] = "UD multicast support", 98 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support", 99 [30] = "IBoE support",
105 [32] = "Unicast loopback support", 100 [32] = "Unicast loopback support",
106 [34] = "FCS header control", 101 [34] = "FCS header control",
107 [38] = "Wake On LAN support", 102 [37] = "Wake On LAN (port1) support",
103 [38] = "Wake On LAN (port2) support",
108 [40] = "UDP RSS support", 104 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support", 105 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support", 106 [42] = "Multicast VEP steering support",
@@ -145,7 +141,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
145 [16] = "CONFIG DEV support", 141 [16] = "CONFIG DEV support",
146 [17] = "Asymmetric EQs support", 142 [17] = "Asymmetric EQs support",
147 [18] = "More than 80 VFs support", 143 [18] = "More than 80 VFs support",
148 [19] = "Performance optimized for limited rule configuration flow steering support" 144 [19] = "Performance optimized for limited rule configuration flow steering support",
145 [20] = "Recoverable error events support",
146 [21] = "Port Remap support"
149 }; 147 };
150 int i; 148 int i;
151 149
@@ -259,6 +257,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
259#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 257#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
260#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 258#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
261#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 259#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
260#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
262 261
263#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 262#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
264#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 263#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
@@ -273,6 +272,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
273#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 272#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
274#define QUERY_FUNC_CAP_FLAG_ETH 0x80 273#define QUERY_FUNC_CAP_FLAG_ETH 0x80
275#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 274#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
275#define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
276#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 276#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
277 277
278#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) 278#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
@@ -344,9 +344,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
344 } else if (vhcr->op_modifier == 0) { 344 } else if (vhcr->op_modifier == 0) {
345 struct mlx4_active_ports actv_ports = 345 struct mlx4_active_ports actv_ports =
346 mlx4_get_active_ports(dev, slave); 346 mlx4_get_active_ports(dev, slave);
347 /* enable rdma and ethernet interfaces, and new quota locations */ 347 /* enable rdma and ethernet interfaces, new quota locations,
348 * and reserved lkey
349 */
348 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 350 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
349 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX); 351 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
352 QUERY_FUNC_CAP_FLAG_RESD_LKEY);
350 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 353 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
351 354
352 field = min( 355 field = min(
@@ -411,6 +414,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
411 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | 414 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
412 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; 415 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
413 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 416 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
417
418 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
419 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
414 } else 420 } else
415 err = -EINVAL; 421 err = -EINVAL;
416 422
@@ -503,6 +509,13 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
503 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 509 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
504 func_cap->reserved_eq = size & 0xFFFFFF; 510 func_cap->reserved_eq = size & 0xFFFFFF;
505 511
512 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
513 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
514 func_cap->reserved_lkey = size;
515 } else {
516 func_cap->reserved_lkey = 0;
517 }
518
506 func_cap->extra_flags = 0; 519 func_cap->extra_flags = 0;
507 520
508 /* Mailbox data from 0x6c and onward should only be treated if 521 /* Mailbox data from 0x6c and onward should only be treated if
@@ -851,6 +864,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
851 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 864 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
852 MLX4_GET(dev_cap->bmme_flags, outbox, 865 MLX4_GET(dev_cap->bmme_flags, outbox,
853 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 866 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
867 if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
868 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
854 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 869 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
855 if (field & 0x20) 870 if (field & 0x20)
856 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; 871 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
@@ -859,6 +874,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
859 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); 874 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
860 if (field32 & (1 << 0)) 875 if (field32 & (1 << 0))
861 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; 876 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
877 if (field32 & (1 << 7))
878 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
862 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 879 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
863 if (field & 1<<6) 880 if (field & 1<<6)
864 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; 881 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -1106,9 +1123,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1106 field &= 0x7f; 1123 field &= 0x7f;
1107 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 1124 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
1108 1125
1109 /* For guests, disable mw type 2 */ 1126 /* For guests, disable mw type 2 and port remap*/
1110 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1127 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1111 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 1128 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
1129 bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
1112 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1130 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1113 1131
1114 /* turn off device-managed steering capability if not enabled */ 1132 /* turn off device-managed steering capability if not enabled */
@@ -1562,6 +1580,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1562#define INIT_HCA_VXLAN_OFFSET 0x0c 1580#define INIT_HCA_VXLAN_OFFSET 0x0c
1563#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e 1581#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1564#define INIT_HCA_FLAGS_OFFSET 0x014 1582#define INIT_HCA_FLAGS_OFFSET 0x014
1583#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1565#define INIT_HCA_QPC_OFFSET 0x020 1584#define INIT_HCA_QPC_OFFSET 0x020
1566#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 1585#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1567#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) 1586#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
@@ -1668,6 +1687,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1668 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1687 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1669 } 1688 }
1670 1689
1690 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1691 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1692
1671 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1693 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1672 1694
1673 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 1695 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -1752,8 +1774,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1752 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); 1774 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
1753 } 1775 }
1754 1776
1755 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, 1777 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
1756 MLX4_CMD_NATIVE); 1778 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1757 1779
1758 if (err) 1780 if (err)
1759 mlx4_err(dev, "INIT_HCA returns %d\n", err); 1781 mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -1879,6 +1901,36 @@ out:
1879 return err; 1901 return err;
1880} 1902}
1881 1903
1904static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
1905{
1906 struct mlx4_cmd_mailbox *mailbox;
1907 __be32 *outbox;
1908 int err;
1909
1910 mailbox = mlx4_alloc_cmd_mailbox(dev);
1911 if (IS_ERR(mailbox)) {
1912 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
1913 return PTR_ERR(mailbox);
1914 }
1915 outbox = mailbox->buf;
1916
1917 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1918 MLX4_CMD_QUERY_HCA,
1919 MLX4_CMD_TIME_CLASS_B,
1920 !mlx4_is_slave(dev));
1921 if (err) {
1922 mlx4_warn(dev, "hca_core_clock update failed\n");
1923 goto out;
1924 }
1925
1926 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1927
1928out:
1929 mlx4_free_cmd_mailbox(dev, mailbox);
1930
1931 return err;
1932}
1933
1882/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 1934/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1883 * and real QP0 are active, so that the paravirtualized QP0 is ready 1935 * and real QP0 are active, so that the paravirtualized QP0 is ready
1884 * to operate */ 1936 * to operate */
@@ -1983,6 +2035,9 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1983 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2035 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1984 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2036 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1985 2037
2038 if (!err)
2039 mlx4_hca_core_clock_update(dev);
2040
1986 return err; 2041 return err;
1987} 2042}
1988EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); 2043EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
@@ -2007,7 +2062,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2007 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2062 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2008 if (priv->mfunc.master.init_port_ref[port] == 1) { 2063 if (priv->mfunc.master.init_port_ref[port] == 1) {
2009 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2064 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2010 1000, MLX4_CMD_NATIVE); 2065 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2011 if (err) 2066 if (err)
2012 return err; 2067 return err;
2013 } 2068 }
@@ -2018,7 +2073,7 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2018 if (!priv->mfunc.master.qp0_state[port].qp0_active && 2073 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
2019 priv->mfunc.master.qp0_state[port].port_active) { 2074 priv->mfunc.master.qp0_state[port].port_active) {
2020 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2075 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2021 1000, MLX4_CMD_NATIVE); 2076 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2022 if (err) 2077 if (err)
2023 return err; 2078 return err;
2024 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); 2079 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
@@ -2033,15 +2088,15 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2033 2088
2034int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 2089int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
2035{ 2090{
2036 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, 2091 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2037 MLX4_CMD_WRAPPED); 2092 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2038} 2093}
2039EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 2094EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
2040 2095
2041int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 2096int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
2042{ 2097{
2043 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, 2098 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
2044 MLX4_CMD_NATIVE); 2099 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
2045} 2100}
2046 2101
2047struct mlx4_config_dev { 2102struct mlx4_config_dev {
@@ -2049,13 +2104,16 @@ struct mlx4_config_dev {
2049 __be32 rsvd1[3]; 2104 __be32 rsvd1[3];
2050 __be16 vxlan_udp_dport; 2105 __be16 vxlan_udp_dport;
2051 __be16 rsvd2; 2106 __be16 rsvd2;
2052 __be32 rsvd3[27]; 2107 __be32 rsvd3;
2053 __be16 rsvd4; 2108 __be32 roce_flags;
2054 u8 rsvd5; 2109 __be32 rsvd4[25];
2110 __be16 rsvd5;
2111 u8 rsvd6;
2055 u8 rx_checksum_val; 2112 u8 rx_checksum_val;
2056}; 2113};
2057 2114
2058#define MLX4_VXLAN_UDP_DPORT (1 << 0) 2115#define MLX4_VXLAN_UDP_DPORT (1 << 0)
2116#define MLX4_DISABLE_RX_PORT BIT(18)
2059 2117
2060static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2118static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2061{ 2119{
@@ -2111,7 +2169,7 @@ static const u8 config_dev_csum_flags[] = {
2111int mlx4_config_dev_retrieval(struct mlx4_dev *dev, 2169int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
2112 struct mlx4_config_dev_params *params) 2170 struct mlx4_config_dev_params *params)
2113{ 2171{
2114 struct mlx4_config_dev config_dev; 2172 struct mlx4_config_dev config_dev = {0};
2115 int err; 2173 int err;
2116 u8 csum_mask; 2174 u8 csum_mask;
2117 2175
@@ -2158,6 +2216,45 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2158} 2216}
2159EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); 2217EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
2160 2218
2219#define CONFIG_DISABLE_RX_PORT BIT(15)
2220int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
2221{
2222 struct mlx4_config_dev config_dev;
2223
2224 memset(&config_dev, 0, sizeof(config_dev));
2225 config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
2226 if (dis)
2227 config_dev.roce_flags =
2228 cpu_to_be32(CONFIG_DISABLE_RX_PORT);
2229
2230 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2231}
2232
2233int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
2234{
2235 struct mlx4_cmd_mailbox *mailbox;
2236 struct {
2237 __be32 v_port1;
2238 __be32 v_port2;
2239 } *v2p;
2240 int err;
2241
2242 mailbox = mlx4_alloc_cmd_mailbox(dev);
2243 if (IS_ERR(mailbox))
2244 return -ENOMEM;
2245
2246 v2p = mailbox->buf;
2247 v2p->v_port1 = cpu_to_be32(port1);
2248 v2p->v_port2 = cpu_to_be32(port2);
2249
2250 err = mlx4_cmd(dev, mailbox->dma, 0,
2251 MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
2252 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2253
2254 mlx4_free_cmd_mailbox(dev, mailbox);
2255 return err;
2256}
2257
2161 2258
2162int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 2259int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2163{ 2260{
@@ -2180,7 +2277,8 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2180int mlx4_NOP(struct mlx4_dev *dev) 2277int mlx4_NOP(struct mlx4_dev *dev)
2181{ 2278{
2182 /* Input modifier of 0x1f means "finish as soon as possible." */ 2279 /* Input modifier of 0x1f means "finish as soon as possible." */
2183 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE); 2280 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
2281 MLX4_CMD_NATIVE);
2184} 2282}
2185 2283
2186int mlx4_get_phys_port_id(struct mlx4_dev *dev) 2284int mlx4_get_phys_port_id(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 62562b60fa87..f44f7f6017ed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -147,6 +147,7 @@ struct mlx4_func_cap {
147 u32 qp0_proxy_qpn; 147 u32 qp0_proxy_qpn;
148 u32 qp1_tunnel_qpn; 148 u32 qp1_tunnel_qpn;
149 u32 qp1_proxy_qpn; 149 u32 qp1_proxy_qpn;
150 u32 reserved_lkey;
150 u8 physical_port; 151 u8 physical_port;
151 u8 port_flags; 152 u8 port_flags;
152 u8 flags1; 153 u8 flags1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 97c9b1db1d27..2a9dd460a95f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -56,7 +56,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
56 int i; 56 int i;
57 57
58 if (chunk->nsg > 0) 58 if (chunk->nsg > 0)
59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, 59 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
60 PCI_DMA_BIDIRECTIONAL); 60 PCI_DMA_BIDIRECTIONAL);
61 61
62 for (i = 0; i < chunk->npages; ++i) 62 for (i = 0; i < chunk->npages; ++i)
@@ -69,7 +69,8 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
69 int i; 69 int i;
70 70
71 for (i = 0; i < chunk->npages; ++i) 71 for (i = 0; i < chunk->npages; ++i)
72 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 72 dma_free_coherent(&dev->persist->pdev->dev,
73 chunk->mem[i].length,
73 lowmem_page_address(sg_page(&chunk->mem[i])), 74 lowmem_page_address(sg_page(&chunk->mem[i])),
74 sg_dma_address(&chunk->mem[i])); 75 sg_dma_address(&chunk->mem[i]));
75} 76}
@@ -173,7 +174,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
173 --cur_order; 174 --cur_order;
174 175
175 if (coherent) 176 if (coherent)
176 ret = mlx4_alloc_icm_coherent(&dev->pdev->dev, 177 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
177 &chunk->mem[chunk->npages], 178 &chunk->mem[chunk->npages],
178 cur_order, gfp_mask); 179 cur_order, gfp_mask);
179 else 180 else
@@ -193,7 +194,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
193 if (coherent) 194 if (coherent)
194 ++chunk->nsg; 195 ++chunk->nsg;
195 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 196 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
196 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 197 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
197 chunk->npages, 198 chunk->npages,
198 PCI_DMA_BIDIRECTIONAL); 199 PCI_DMA_BIDIRECTIONAL);
199 200
@@ -208,7 +209,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
208 } 209 }
209 210
210 if (!coherent && chunk) { 211 if (!coherent && chunk) {
211 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 212 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
212 chunk->npages, 213 chunk->npages,
213 PCI_DMA_BIDIRECTIONAL); 214 PCI_DMA_BIDIRECTIONAL);
214 215
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 116895ac8b35..6fce58718837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -33,11 +33,13 @@
33 33
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/export.h> 35#include <linux/export.h>
36#include <linux/errno.h>
36 37
37#include "mlx4.h" 38#include "mlx4.h"
38 39
39struct mlx4_device_context { 40struct mlx4_device_context {
40 struct list_head list; 41 struct list_head list;
42 struct list_head bond_list;
41 struct mlx4_interface *intf; 43 struct mlx4_interface *intf;
42 void *context; 44 void *context;
43}; 45};
@@ -115,6 +117,58 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
115} 117}
116EXPORT_SYMBOL_GPL(mlx4_unregister_interface); 118EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
117 119
120int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
121{
122 struct mlx4_priv *priv = mlx4_priv(dev);
123 struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
124 unsigned long flags;
125 int ret;
126 LIST_HEAD(bond_list);
127
128 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
129 return -ENOTSUPP;
130
131 ret = mlx4_disable_rx_port_check(dev, enable);
132 if (ret) {
133 mlx4_err(dev, "Fail to %s rx port check\n",
134 enable ? "enable" : "disable");
135 return ret;
136 }
137 if (enable) {
138 dev->flags |= MLX4_FLAG_BONDED;
139 } else {
140 ret = mlx4_virt2phy_port_map(dev, 1, 2);
141 if (ret) {
142 mlx4_err(dev, "Fail to reset port map\n");
143 return ret;
144 }
145 dev->flags &= ~MLX4_FLAG_BONDED;
146 }
147
148 spin_lock_irqsave(&priv->ctx_lock, flags);
149 list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
150 if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
151 list_add_tail(&dev_ctx->bond_list, &bond_list);
152 list_del(&dev_ctx->list);
153 }
154 }
155 spin_unlock_irqrestore(&priv->ctx_lock, flags);
156
157 list_for_each_entry(dev_ctx, &bond_list, bond_list) {
158 dev_ctx->intf->remove(dev, dev_ctx->context);
159 dev_ctx->context = dev_ctx->intf->add(dev);
160
161 spin_lock_irqsave(&priv->ctx_lock, flags);
162 list_add_tail(&dev_ctx->list, &priv->ctx_list);
163 spin_unlock_irqrestore(&priv->ctx_lock, flags);
164
165 mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
166 dev_ctx->intf->protocol, enable ?
167 "enabled" : "disabled");
168 }
169 return 0;
170}
171
118void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, 172void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
119 unsigned long param) 173 unsigned long param)
120{ 174{
@@ -138,13 +192,13 @@ int mlx4_register_device(struct mlx4_dev *dev)
138 192
139 mutex_lock(&intf_mutex); 193 mutex_lock(&intf_mutex);
140 194
195 dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
141 list_add_tail(&priv->dev_list, &dev_list); 196 list_add_tail(&priv->dev_list, &dev_list);
142 list_for_each_entry(intf, &intf_list, list) 197 list_for_each_entry(intf, &intf_list, list)
143 mlx4_add_device(intf, priv); 198 mlx4_add_device(intf, priv);
144 199
145 mutex_unlock(&intf_mutex); 200 mutex_unlock(&intf_mutex);
146 if (!mlx4_is_slave(dev)) 201 mlx4_start_catas_poll(dev);
147 mlx4_start_catas_poll(dev);
148 202
149 return 0; 203 return 0;
150} 204}
@@ -154,14 +208,14 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
154 struct mlx4_priv *priv = mlx4_priv(dev); 208 struct mlx4_priv *priv = mlx4_priv(dev);
155 struct mlx4_interface *intf; 209 struct mlx4_interface *intf;
156 210
157 if (!mlx4_is_slave(dev)) 211 mlx4_stop_catas_poll(dev);
158 mlx4_stop_catas_poll(dev);
159 mutex_lock(&intf_mutex); 212 mutex_lock(&intf_mutex);
160 213
161 list_for_each_entry(intf, &intf_list, list) 214 list_for_each_entry(intf, &intf_list, list)
162 mlx4_remove_device(intf, priv); 215 mlx4_remove_device(intf, priv);
163 216
164 list_del(&priv->dev_list); 217 list_del(&priv->dev_list);
218 dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
165 219
166 mutex_unlock(&intf_mutex); 220 mutex_unlock(&intf_mutex);
167} 221}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 6e08352ec994..7e487223489a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -108,6 +108,8 @@ MODULE_PARM_DESC(enable_64b_cqe_eqe,
108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
109 MLX4_FUNC_CAP_DMFS_A0_STATIC) 109 MLX4_FUNC_CAP_DMFS_A0_STATIC)
110 110
111#define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
112
111static char mlx4_version[] = 113static char mlx4_version[] =
112 DRV_NAME ": Mellanox ConnectX core driver v" 114 DRV_NAME ": Mellanox ConnectX core driver v"
113 DRV_VERSION " (" DRV_RELDATE ")\n"; 115 DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -249,7 +251,8 @@ static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
249 if (mlx4_is_master(dev)) 251 if (mlx4_is_master(dev))
250 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 252 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
251 } else { 253 } else {
252 mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); 254 if (cache_line_size() != 32 && cache_line_size() != 64)
255 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
253 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 256 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
254 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 257 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
255 } 258 }
@@ -318,10 +321,11 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
318 return -ENODEV; 321 return -ENODEV;
319 } 322 }
320 323
321 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 324 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
322 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 325 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
323 dev_cap->uar_size, 326 dev_cap->uar_size,
324 (unsigned long long) pci_resource_len(dev->pdev, 2)); 327 (unsigned long long)
328 pci_resource_len(dev->persist->pdev, 2));
325 return -ENODEV; 329 return -ENODEV;
326 } 330 }
327 331
@@ -541,8 +545,10 @@ static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
541 *speed = PCI_SPEED_UNKNOWN; 545 *speed = PCI_SPEED_UNKNOWN;
542 *width = PCIE_LNK_WIDTH_UNKNOWN; 546 *width = PCIE_LNK_WIDTH_UNKNOWN;
543 547
544 err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1); 548 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
545 err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2); 549 &lnkcap1);
550 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
551 &lnkcap2);
546 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 552 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
547 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 553 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
548 *speed = PCIE_SPEED_8_0GT; 554 *speed = PCIE_SPEED_8_0GT;
@@ -587,7 +593,7 @@ static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
587 return; 593 return;
588 } 594 }
589 595
590 err = pcie_get_minimum_link(dev->pdev, &speed, &width); 596 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
591 if (err || speed == PCI_SPEED_UNKNOWN || 597 if (err || speed == PCI_SPEED_UNKNOWN ||
592 width == PCIE_LNK_WIDTH_UNKNOWN) { 598 width == PCIE_LNK_WIDTH_UNKNOWN) {
593 mlx4_warn(dev, 599 mlx4_warn(dev,
@@ -792,6 +798,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
792 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 798 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
793 dev->caps.num_eqs = func_cap.max_eq; 799 dev->caps.num_eqs = func_cap.max_eq;
794 dev->caps.reserved_eqs = func_cap.reserved_eq; 800 dev->caps.reserved_eqs = func_cap.reserved_eq;
801 dev->caps.reserved_lkey = func_cap.reserved_lkey;
795 dev->caps.num_pds = MLX4_NUM_PDS; 802 dev->caps.num_pds = MLX4_NUM_PDS;
796 dev->caps.num_mgms = 0; 803 dev->caps.num_mgms = 0;
797 dev->caps.num_amgms = 0; 804 dev->caps.num_amgms = 0;
@@ -837,10 +844,12 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
837 844
838 if (dev->caps.uar_page_size * (dev->caps.num_uars - 845 if (dev->caps.uar_page_size * (dev->caps.num_uars -
839 dev->caps.reserved_uars) > 846 dev->caps.reserved_uars) >
840 pci_resource_len(dev->pdev, 2)) { 847 pci_resource_len(dev->persist->pdev,
848 2)) {
841 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 849 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
842 dev->caps.uar_page_size * dev->caps.num_uars, 850 dev->caps.uar_page_size * dev->caps.num_uars,
843 (unsigned long long) pci_resource_len(dev->pdev, 2)); 851 (unsigned long long)
852 pci_resource_len(dev->persist->pdev, 2));
844 goto err_mem; 853 goto err_mem;
845 } 854 }
846 855
@@ -1152,6 +1161,91 @@ err_set_port:
1152 return err ? err : count; 1161 return err ? err : count;
1153} 1162}
1154 1163
1164int mlx4_bond(struct mlx4_dev *dev)
1165{
1166 int ret = 0;
1167 struct mlx4_priv *priv = mlx4_priv(dev);
1168
1169 mutex_lock(&priv->bond_mutex);
1170
1171 if (!mlx4_is_bonded(dev))
1172 ret = mlx4_do_bond(dev, true);
1173 else
1174 ret = 0;
1175
1176 mutex_unlock(&priv->bond_mutex);
1177 if (ret)
1178 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1179 else
1180 mlx4_dbg(dev, "Device is bonded\n");
1181 return ret;
1182}
1183EXPORT_SYMBOL_GPL(mlx4_bond);
1184
1185int mlx4_unbond(struct mlx4_dev *dev)
1186{
1187 int ret = 0;
1188 struct mlx4_priv *priv = mlx4_priv(dev);
1189
1190 mutex_lock(&priv->bond_mutex);
1191
1192 if (mlx4_is_bonded(dev))
1193 ret = mlx4_do_bond(dev, false);
1194
1195 mutex_unlock(&priv->bond_mutex);
1196 if (ret)
1197 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1198 else
1199 mlx4_dbg(dev, "Device is unbonded\n");
1200 return ret;
1201}
1202EXPORT_SYMBOL_GPL(mlx4_unbond);
1203
1204
1205int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1206{
1207 u8 port1 = v2p->port1;
1208 u8 port2 = v2p->port2;
1209 struct mlx4_priv *priv = mlx4_priv(dev);
1210 int err;
1211
1212 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1213 return -ENOTSUPP;
1214
1215 mutex_lock(&priv->bond_mutex);
1216
1217 /* zero means keep current mapping for this port */
1218 if (port1 == 0)
1219 port1 = priv->v2p.port1;
1220 if (port2 == 0)
1221 port2 = priv->v2p.port2;
1222
1223 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1224 (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1225 (port1 == 2 && port2 == 1)) {
1226 /* besides boundary checks cross mapping makes
1227 * no sense and therefore not allowed */
1228 err = -EINVAL;
1229 } else if ((port1 == priv->v2p.port1) &&
1230 (port2 == priv->v2p.port2)) {
1231 err = 0;
1232 } else {
1233 err = mlx4_virt2phy_port_map(dev, port1, port2);
1234 if (!err) {
1235 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1236 port1, port2);
1237 priv->v2p.port1 = port1;
1238 priv->v2p.port2 = port2;
1239 } else {
1240 mlx4_err(dev, "Failed to change port mape: %d\n", err);
1241 }
1242 }
1243
1244 mutex_unlock(&priv->bond_mutex);
1245 return err;
1246}
1247EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1248
1155static int mlx4_load_fw(struct mlx4_dev *dev) 1249static int mlx4_load_fw(struct mlx4_dev *dev)
1156{ 1250{
1157 struct mlx4_priv *priv = mlx4_priv(dev); 1251 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1477,7 +1571,8 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
1477 struct mlx4_priv *priv = mlx4_priv(dev); 1571 struct mlx4_priv *priv = mlx4_priv(dev);
1478 1572
1479 mutex_lock(&priv->cmd.slave_cmd_mutex); 1573 mutex_lock(&priv->cmd.slave_cmd_mutex);
1480 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 1574 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1575 MLX4_COMM_TIME))
1481 mlx4_warn(dev, "Failed to close slave function\n"); 1576 mlx4_warn(dev, "Failed to close slave function\n");
1482 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1577 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1483} 1578}
@@ -1492,9 +1587,9 @@ static int map_bf_area(struct mlx4_dev *dev)
1492 if (!dev->caps.bf_reg_size) 1587 if (!dev->caps.bf_reg_size)
1493 return -ENXIO; 1588 return -ENXIO;
1494 1589
1495 bf_start = pci_resource_start(dev->pdev, 2) + 1590 bf_start = pci_resource_start(dev->persist->pdev, 2) +
1496 (dev->caps.num_uars << PAGE_SHIFT); 1591 (dev->caps.num_uars << PAGE_SHIFT);
1497 bf_len = pci_resource_len(dev->pdev, 2) - 1592 bf_len = pci_resource_len(dev->persist->pdev, 2) -
1498 (dev->caps.num_uars << PAGE_SHIFT); 1593 (dev->caps.num_uars << PAGE_SHIFT);
1499 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1594 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1500 if (!priv->bf_mapping) 1595 if (!priv->bf_mapping)
@@ -1536,7 +1631,8 @@ static int map_internal_clock(struct mlx4_dev *dev)
1536 struct mlx4_priv *priv = mlx4_priv(dev); 1631 struct mlx4_priv *priv = mlx4_priv(dev);
1537 1632
1538 priv->clock_mapping = 1633 priv->clock_mapping =
1539 ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) + 1634 ioremap(pci_resource_start(dev->persist->pdev,
1635 priv->fw.clock_bar) +
1540 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1636 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1541 1637
1542 if (!priv->clock_mapping) 1638 if (!priv->clock_mapping)
@@ -1573,6 +1669,50 @@ static void mlx4_close_fw(struct mlx4_dev *dev)
1573 } 1669 }
1574} 1670}
1575 1671
1672static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1673{
1674#define COMM_CHAN_OFFLINE_OFFSET 0x09
1675
1676 u32 comm_flags;
1677 u32 offline_bit;
1678 unsigned long end;
1679 struct mlx4_priv *priv = mlx4_priv(dev);
1680
1681 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
1682 while (time_before(jiffies, end)) {
1683 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
1684 MLX4_COMM_CHAN_FLAGS));
1685 offline_bit = (comm_flags &
1686 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1687 if (!offline_bit)
1688 return 0;
1689 /* There are cases as part of AER/Reset flow that PF needs
1690 * around 100 msec to load. We therefore sleep for 100 msec
1691 * to allow other tasks to make use of that CPU during this
1692 * time interval.
1693 */
1694 msleep(100);
1695 }
1696 mlx4_err(dev, "Communication channel is offline.\n");
1697 return -EIO;
1698}
1699
1700static void mlx4_reset_vf_support(struct mlx4_dev *dev)
1701{
1702#define COMM_CHAN_RST_OFFSET 0x1e
1703
1704 struct mlx4_priv *priv = mlx4_priv(dev);
1705 u32 comm_rst;
1706 u32 comm_caps;
1707
1708 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
1709 MLX4_COMM_CHAN_CAPS));
1710 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
1711
1712 if (comm_rst)
1713 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
1714}
1715
1576static int mlx4_init_slave(struct mlx4_dev *dev) 1716static int mlx4_init_slave(struct mlx4_dev *dev)
1577{ 1717{
1578 struct mlx4_priv *priv = mlx4_priv(dev); 1718 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1588,9 +1728,15 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1588 1728
1589 mutex_lock(&priv->cmd.slave_cmd_mutex); 1729 mutex_lock(&priv->cmd.slave_cmd_mutex);
1590 priv->cmd.max_cmds = 1; 1730 priv->cmd.max_cmds = 1;
1731 if (mlx4_comm_check_offline(dev)) {
1732 mlx4_err(dev, "PF is not responsive, skipping initialization\n");
1733 goto err_offline;
1734 }
1735
1736 mlx4_reset_vf_support(dev);
1591 mlx4_warn(dev, "Sending reset\n"); 1737 mlx4_warn(dev, "Sending reset\n");
1592 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1738 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1593 MLX4_COMM_TIME); 1739 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
1594 /* if we are in the middle of flr the slave will try 1740 /* if we are in the middle of flr the slave will try
1595 * NUM_OF_RESET_RETRIES times before leaving.*/ 1741 * NUM_OF_RESET_RETRIES times before leaving.*/
1596 if (ret_from_reset) { 1742 if (ret_from_reset) {
@@ -1615,22 +1761,24 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
1615 1761
1616 mlx4_warn(dev, "Sending vhcr0\n"); 1762 mlx4_warn(dev, "Sending vhcr0\n");
1617 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1763 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1618 MLX4_COMM_TIME)) 1764 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1619 goto err; 1765 goto err;
1620 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1766 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1621 MLX4_COMM_TIME)) 1767 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1622 goto err; 1768 goto err;
1623 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1769 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1624 MLX4_COMM_TIME)) 1770 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1625 goto err; 1771 goto err;
1626 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) 1772 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
1773 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1627 goto err; 1774 goto err;
1628 1775
1629 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1776 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1630 return 0; 1777 return 0;
1631 1778
1632err: 1779err:
1633 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); 1780 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
1781err_offline:
1634 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1782 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1635 return -EIO; 1783 return -EIO;
1636} 1784}
@@ -1705,7 +1853,8 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1705 if (mlx4_log_num_mgm_entry_size <= 0 && 1853 if (mlx4_log_num_mgm_entry_size <= 0 &&
1706 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1854 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1707 (!mlx4_is_mfunc(dev) || 1855 (!mlx4_is_mfunc(dev) ||
1708 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) && 1856 (dev_cap->fs_max_num_qp_per_entry >=
1857 (dev->persist->num_vfs + 1))) &&
1709 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1858 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1710 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1859 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1711 dev->oper_log_mgm_entry_size = 1860 dev->oper_log_mgm_entry_size =
@@ -2287,7 +2436,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2287 for (i = 0; i < nreq; ++i) 2436 for (i = 0; i < nreq; ++i)
2288 entries[i].entry = i; 2437 entries[i].entry = i;
2289 2438
2290 nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq); 2439 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2440 nreq);
2291 2441
2292 if (nreq < 0) { 2442 if (nreq < 0) {
2293 kfree(entries); 2443 kfree(entries);
@@ -2315,7 +2465,7 @@ no_msi:
2315 dev->caps.comp_pool = 0; 2465 dev->caps.comp_pool = 0;
2316 2466
2317 for (i = 0; i < 2; ++i) 2467 for (i = 0; i < 2; ++i)
2318 priv->eq_table.eq[i].irq = dev->pdev->irq; 2468 priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
2319} 2469}
2320 2470
2321static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2471static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2343,7 +2493,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2343 info->port_attr.show = show_port_type; 2493 info->port_attr.show = show_port_type;
2344 sysfs_attr_init(&info->port_attr.attr); 2494 sysfs_attr_init(&info->port_attr.attr);
2345 2495
2346 err = device_create_file(&dev->pdev->dev, &info->port_attr); 2496 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
2347 if (err) { 2497 if (err) {
2348 mlx4_err(dev, "Failed to create file for port %d\n", port); 2498 mlx4_err(dev, "Failed to create file for port %d\n", port);
2349 info->port = -1; 2499 info->port = -1;
@@ -2360,10 +2510,12 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2360 info->port_mtu_attr.show = show_port_ib_mtu; 2510 info->port_mtu_attr.show = show_port_ib_mtu;
2361 sysfs_attr_init(&info->port_mtu_attr.attr); 2511 sysfs_attr_init(&info->port_mtu_attr.attr);
2362 2512
2363 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); 2513 err = device_create_file(&dev->persist->pdev->dev,
2514 &info->port_mtu_attr);
2364 if (err) { 2515 if (err) {
2365 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2516 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2366 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 2517 device_remove_file(&info->dev->persist->pdev->dev,
2518 &info->port_attr);
2367 info->port = -1; 2519 info->port = -1;
2368 } 2520 }
2369 2521
@@ -2375,8 +2527,9 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2375 if (info->port < 0) 2527 if (info->port < 0)
2376 return; 2528 return;
2377 2529
2378 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 2530 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
2379 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); 2531 device_remove_file(&info->dev->persist->pdev->dev,
2532 &info->port_mtu_attr);
2380} 2533}
2381 2534
2382static int mlx4_init_steering(struct mlx4_dev *dev) 2535static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2443,10 +2596,11 @@ static int mlx4_get_ownership(struct mlx4_dev *dev)
2443 void __iomem *owner; 2596 void __iomem *owner;
2444 u32 ret; 2597 u32 ret;
2445 2598
2446 if (pci_channel_offline(dev->pdev)) 2599 if (pci_channel_offline(dev->persist->pdev))
2447 return -EIO; 2600 return -EIO;
2448 2601
2449 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 2602 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
2603 MLX4_OWNER_BASE,
2450 MLX4_OWNER_SIZE); 2604 MLX4_OWNER_SIZE);
2451 if (!owner) { 2605 if (!owner) {
2452 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2606 mlx4_err(dev, "Failed to obtain ownership bit\n");
@@ -2462,10 +2616,11 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
2462{ 2616{
2463 void __iomem *owner; 2617 void __iomem *owner;
2464 2618
2465 if (pci_channel_offline(dev->pdev)) 2619 if (pci_channel_offline(dev->persist->pdev))
2466 return; 2620 return;
2467 2621
2468 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 2622 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
2623 MLX4_OWNER_BASE,
2469 MLX4_OWNER_SIZE); 2624 MLX4_OWNER_SIZE);
2470 if (!owner) { 2625 if (!owner) {
2471 mlx4_err(dev, "Failed to obtain ownership bit\n"); 2626 mlx4_err(dev, "Failed to obtain ownership bit\n");
@@ -2480,11 +2635,19 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
2480 !!((flags) & MLX4_FLAG_MASTER)) 2635 !!((flags) & MLX4_FLAG_MASTER))
2481 2636
2482static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 2637static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
2483 u8 total_vfs, int existing_vfs) 2638 u8 total_vfs, int existing_vfs, int reset_flow)
2484{ 2639{
2485 u64 dev_flags = dev->flags; 2640 u64 dev_flags = dev->flags;
2486 int err = 0; 2641 int err = 0;
2487 2642
2643 if (reset_flow) {
2644 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
2645 GFP_KERNEL);
2646 if (!dev->dev_vfs)
2647 goto free_mem;
2648 return dev_flags;
2649 }
2650
2488 atomic_inc(&pf_loading); 2651 atomic_inc(&pf_loading);
2489 if (dev->flags & MLX4_FLAG_SRIOV) { 2652 if (dev->flags & MLX4_FLAG_SRIOV) {
2490 if (existing_vfs != total_vfs) { 2653 if (existing_vfs != total_vfs) {
@@ -2513,13 +2676,14 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
2513 dev_flags |= MLX4_FLAG_SRIOV | 2676 dev_flags |= MLX4_FLAG_SRIOV |
2514 MLX4_FLAG_MASTER; 2677 MLX4_FLAG_MASTER;
2515 dev_flags &= ~MLX4_FLAG_SLAVE; 2678 dev_flags &= ~MLX4_FLAG_SLAVE;
2516 dev->num_vfs = total_vfs; 2679 dev->persist->num_vfs = total_vfs;
2517 } 2680 }
2518 return dev_flags; 2681 return dev_flags;
2519 2682
2520disable_sriov: 2683disable_sriov:
2521 atomic_dec(&pf_loading); 2684 atomic_dec(&pf_loading);
2522 dev->num_vfs = 0; 2685free_mem:
2686 dev->persist->num_vfs = 0;
2523 kfree(dev->dev_vfs); 2687 kfree(dev->dev_vfs);
2524 return dev_flags & ~MLX4_FLAG_MASTER; 2688 return dev_flags & ~MLX4_FLAG_MASTER;
2525} 2689}
@@ -2543,7 +2707,8 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
2543} 2707}
2544 2708
2545static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 2709static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2546 int total_vfs, int *nvfs, struct mlx4_priv *priv) 2710 int total_vfs, int *nvfs, struct mlx4_priv *priv,
2711 int reset_flow)
2547{ 2712{
2548 struct mlx4_dev *dev; 2713 struct mlx4_dev *dev;
2549 unsigned sum = 0; 2714 unsigned sum = 0;
@@ -2559,6 +2724,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2559 spin_lock_init(&priv->ctx_lock); 2724 spin_lock_init(&priv->ctx_lock);
2560 2725
2561 mutex_init(&priv->port_mutex); 2726 mutex_init(&priv->port_mutex);
2727 mutex_init(&priv->bond_mutex);
2562 2728
2563 INIT_LIST_HEAD(&priv->pgdir_list); 2729 INIT_LIST_HEAD(&priv->pgdir_list);
2564 mutex_init(&priv->pgdir_mutex); 2730 mutex_init(&priv->pgdir_mutex);
@@ -2606,10 +2772,15 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2606 existing_vfs = pci_num_vf(pdev); 2772 existing_vfs = pci_num_vf(pdev);
2607 if (existing_vfs) 2773 if (existing_vfs)
2608 dev->flags |= MLX4_FLAG_SRIOV; 2774 dev->flags |= MLX4_FLAG_SRIOV;
2609 dev->num_vfs = total_vfs; 2775 dev->persist->num_vfs = total_vfs;
2610 } 2776 }
2611 } 2777 }
2612 2778
2779 /* on load remove any previous indication of internal error,
2780 * device is up.
2781 */
2782 dev->persist->state = MLX4_DEVICE_STATE_UP;
2783
2613slave_start: 2784slave_start:
2614 err = mlx4_cmd_init(dev); 2785 err = mlx4_cmd_init(dev);
2615 if (err) { 2786 if (err) {
@@ -2660,8 +2831,10 @@ slave_start:
2660 goto err_fw; 2831 goto err_fw;
2661 2832
2662 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2833 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
2663 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 2834 u64 dev_flags = mlx4_enable_sriov(dev, pdev,
2664 existing_vfs); 2835 total_vfs,
2836 existing_vfs,
2837 reset_flow);
2665 2838
2666 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2839 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2667 dev->flags = dev_flags; 2840 dev->flags = dev_flags;
@@ -2703,7 +2876,7 @@ slave_start:
2703 if (dev->flags & MLX4_FLAG_SRIOV) { 2876 if (dev->flags & MLX4_FLAG_SRIOV) {
2704 if (!existing_vfs) 2877 if (!existing_vfs)
2705 pci_disable_sriov(pdev); 2878 pci_disable_sriov(pdev);
2706 if (mlx4_is_master(dev)) 2879 if (mlx4_is_master(dev) && !reset_flow)
2707 atomic_dec(&pf_loading); 2880 atomic_dec(&pf_loading);
2708 dev->flags &= ~MLX4_FLAG_SRIOV; 2881 dev->flags &= ~MLX4_FLAG_SRIOV;
2709 } 2882 }
@@ -2717,7 +2890,8 @@ slave_start:
2717 } 2890 }
2718 2891
2719 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2892 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
2720 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs); 2893 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
2894 existing_vfs, reset_flow);
2721 2895
2722 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 2896 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
2723 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 2897 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
@@ -2770,12 +2944,14 @@ slave_start:
2770 dev->caps.num_ports); 2944 dev->caps.num_ports);
2771 goto err_close; 2945 goto err_close;
2772 } 2946 }
2773 memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs)); 2947 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
2774 2948
2775 for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) { 2949 for (i = 0;
2950 i < sizeof(dev->persist->nvfs)/
2951 sizeof(dev->persist->nvfs[0]); i++) {
2776 unsigned j; 2952 unsigned j;
2777 2953
2778 for (j = 0; j < dev->nvfs[i]; ++sum, ++j) { 2954 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
2779 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 2955 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
2780 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 2956 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
2781 dev->caps.num_ports; 2957 dev->caps.num_ports;
@@ -2827,6 +3003,17 @@ slave_start:
2827 goto err_steer; 3003 goto err_steer;
2828 3004
2829 mlx4_init_quotas(dev); 3005 mlx4_init_quotas(dev);
3006 /* When PF resources are ready arm its comm channel to enable
3007 * getting commands
3008 */
3009 if (mlx4_is_master(dev)) {
3010 err = mlx4_ARM_COMM_CHANNEL(dev);
3011 if (err) {
3012 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3013 err);
3014 goto err_steer;
3015 }
3016 }
2830 3017
2831 for (port = 1; port <= dev->caps.num_ports; port++) { 3018 for (port = 1; port <= dev->caps.num_ports; port++) {
2832 err = mlx4_init_port_info(dev, port); 3019 err = mlx4_init_port_info(dev, port);
@@ -2834,6 +3021,9 @@ slave_start:
2834 goto err_port; 3021 goto err_port;
2835 } 3022 }
2836 3023
3024 priv->v2p.port1 = 1;
3025 priv->v2p.port2 = 2;
3026
2837 err = mlx4_register_device(dev); 3027 err = mlx4_register_device(dev);
2838 if (err) 3028 if (err)
2839 goto err_port; 3029 goto err_port;
@@ -2845,7 +3035,7 @@ slave_start:
2845 3035
2846 priv->removed = 0; 3036 priv->removed = 0;
2847 3037
2848 if (mlx4_is_master(dev) && dev->num_vfs) 3038 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
2849 atomic_dec(&pf_loading); 3039 atomic_dec(&pf_loading);
2850 3040
2851 kfree(dev_cap); 3041 kfree(dev_cap);
@@ -2879,8 +3069,10 @@ err_free_eq:
2879 mlx4_free_eq_table(dev); 3069 mlx4_free_eq_table(dev);
2880 3070
2881err_master_mfunc: 3071err_master_mfunc:
2882 if (mlx4_is_master(dev)) 3072 if (mlx4_is_master(dev)) {
3073 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
2883 mlx4_multi_func_cleanup(dev); 3074 mlx4_multi_func_cleanup(dev);
3075 }
2884 3076
2885 if (mlx4_is_slave(dev)) { 3077 if (mlx4_is_slave(dev)) {
2886 kfree(dev->caps.qp0_qkey); 3078 kfree(dev->caps.qp0_qkey);
@@ -2904,10 +3096,12 @@ err_cmd:
2904 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3096 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2905 3097
2906err_sriov: 3098err_sriov:
2907 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) 3099 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
2908 pci_disable_sriov(pdev); 3100 pci_disable_sriov(pdev);
3101 dev->flags &= ~MLX4_FLAG_SRIOV;
3102 }
2909 3103
2910 if (mlx4_is_master(dev) && dev->num_vfs) 3104 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
2911 atomic_dec(&pf_loading); 3105 atomic_dec(&pf_loading);
2912 3106
2913 kfree(priv->dev.dev_vfs); 3107 kfree(priv->dev.dev_vfs);
@@ -3048,11 +3242,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3048 } 3242 }
3049 } 3243 }
3050 3244
3051 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); 3245 err = mlx4_catas_init(&priv->dev);
3052 if (err) 3246 if (err)
3053 goto err_release_regions; 3247 goto err_release_regions;
3248
3249 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3250 if (err)
3251 goto err_catas;
3252
3054 return 0; 3253 return 0;
3055 3254
3255err_catas:
3256 mlx4_catas_end(&priv->dev);
3257
3056err_release_regions: 3258err_release_regions:
3057 pci_release_regions(pdev); 3259 pci_release_regions(pdev);
3058 3260
@@ -3075,38 +3277,60 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3075 return -ENOMEM; 3277 return -ENOMEM;
3076 3278
3077 dev = &priv->dev; 3279 dev = &priv->dev;
3078 dev->pdev = pdev; 3280 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
3079 pci_set_drvdata(pdev, dev); 3281 if (!dev->persist) {
3282 kfree(priv);
3283 return -ENOMEM;
3284 }
3285 dev->persist->pdev = pdev;
3286 dev->persist->dev = dev;
3287 pci_set_drvdata(pdev, dev->persist);
3080 priv->pci_dev_data = id->driver_data; 3288 priv->pci_dev_data = id->driver_data;
3289 mutex_init(&dev->persist->device_state_mutex);
3290 mutex_init(&dev->persist->interface_state_mutex);
3081 3291
3082 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3292 ret = __mlx4_init_one(pdev, id->driver_data, priv);
3083 if (ret) 3293 if (ret) {
3294 kfree(dev->persist);
3084 kfree(priv); 3295 kfree(priv);
3296 } else {
3297 pci_save_state(pdev);
3298 }
3085 3299
3086 return ret; 3300 return ret;
3087} 3301}
3088 3302
3303static void mlx4_clean_dev(struct mlx4_dev *dev)
3304{
3305 struct mlx4_dev_persistent *persist = dev->persist;
3306 struct mlx4_priv *priv = mlx4_priv(dev);
3307 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
3308
3309 memset(priv, 0, sizeof(*priv));
3310 priv->dev.persist = persist;
3311 priv->dev.flags = flags;
3312}
3313
3089static void mlx4_unload_one(struct pci_dev *pdev) 3314static void mlx4_unload_one(struct pci_dev *pdev)
3090{ 3315{
3091 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3316 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3317 struct mlx4_dev *dev = persist->dev;
3092 struct mlx4_priv *priv = mlx4_priv(dev); 3318 struct mlx4_priv *priv = mlx4_priv(dev);
3093 int pci_dev_data; 3319 int pci_dev_data;
3094 int p; 3320 int p, i;
3095 int active_vfs = 0;
3096 3321
3097 if (priv->removed) 3322 if (priv->removed)
3098 return; 3323 return;
3099 3324
3325 /* saving current ports type for further use */
3326 for (i = 0; i < dev->caps.num_ports; i++) {
3327 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
3328 dev->persist->curr_port_poss_type[i] = dev->caps.
3329 possible_type[i + 1];
3330 }
3331
3100 pci_dev_data = priv->pci_dev_data; 3332 pci_dev_data = priv->pci_dev_data;
3101 3333
3102 /* Disabling SR-IOV is not allowed while there are active vf's */
3103 if (mlx4_is_master(dev)) {
3104 active_vfs = mlx4_how_many_lives_vf(dev);
3105 if (active_vfs) {
3106 pr_warn("Removing PF when there are active VF's !!\n");
3107 pr_warn("Will not disable SR-IOV.\n");
3108 }
3109 }
3110 mlx4_stop_sense(dev); 3334 mlx4_stop_sense(dev);
3111 mlx4_unregister_device(dev); 3335 mlx4_unregister_device(dev);
3112 3336
@@ -3150,12 +3374,6 @@ static void mlx4_unload_one(struct pci_dev *pdev)
3150 3374
3151 if (dev->flags & MLX4_FLAG_MSI_X) 3375 if (dev->flags & MLX4_FLAG_MSI_X)
3152 pci_disable_msix(pdev); 3376 pci_disable_msix(pdev);
3153 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3154 mlx4_warn(dev, "Disabling SR-IOV\n");
3155 pci_disable_sriov(pdev);
3156 dev->flags &= ~MLX4_FLAG_SRIOV;
3157 dev->num_vfs = 0;
3158 }
3159 3377
3160 if (!mlx4_is_slave(dev)) 3378 if (!mlx4_is_slave(dev))
3161 mlx4_free_ownership(dev); 3379 mlx4_free_ownership(dev);
@@ -3167,42 +3385,96 @@ static void mlx4_unload_one(struct pci_dev *pdev)
3167 kfree(dev->caps.qp1_proxy); 3385 kfree(dev->caps.qp1_proxy);
3168 kfree(dev->dev_vfs); 3386 kfree(dev->dev_vfs);
3169 3387
3170 memset(priv, 0, sizeof(*priv)); 3388 mlx4_clean_dev(dev);
3171 priv->pci_dev_data = pci_dev_data; 3389 priv->pci_dev_data = pci_dev_data;
3172 priv->removed = 1; 3390 priv->removed = 1;
3173} 3391}
3174 3392
3175static void mlx4_remove_one(struct pci_dev *pdev) 3393static void mlx4_remove_one(struct pci_dev *pdev)
3176{ 3394{
3177 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3395 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3396 struct mlx4_dev *dev = persist->dev;
3178 struct mlx4_priv *priv = mlx4_priv(dev); 3397 struct mlx4_priv *priv = mlx4_priv(dev);
3398 int active_vfs = 0;
3399
3400 mutex_lock(&persist->interface_state_mutex);
3401 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3402 mutex_unlock(&persist->interface_state_mutex);
3403
3404 /* Disabling SR-IOV is not allowed while there are active vf's */
3405 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
3406 active_vfs = mlx4_how_many_lives_vf(dev);
3407 if (active_vfs) {
3408 pr_warn("Removing PF when there are active VF's !!\n");
3409 pr_warn("Will not disable SR-IOV.\n");
3410 }
3411 }
3412
3413 /* device marked to be under deletion running now without the lock
3414 * letting other tasks to be terminated
3415 */
3416 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3417 mlx4_unload_one(pdev);
3418 else
3419 mlx4_info(dev, "%s: interface is down\n", __func__);
3420 mlx4_catas_end(dev);
3421 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3422 mlx4_warn(dev, "Disabling SR-IOV\n");
3423 pci_disable_sriov(pdev);
3424 }
3179 3425
3180 mlx4_unload_one(pdev);
3181 pci_release_regions(pdev); 3426 pci_release_regions(pdev);
3182 pci_disable_device(pdev); 3427 pci_disable_device(pdev);
3428 kfree(dev->persist);
3183 kfree(priv); 3429 kfree(priv);
3184 pci_set_drvdata(pdev, NULL); 3430 pci_set_drvdata(pdev, NULL);
3185} 3431}
3186 3432
3433static int restore_current_port_types(struct mlx4_dev *dev,
3434 enum mlx4_port_type *types,
3435 enum mlx4_port_type *poss_types)
3436{
3437 struct mlx4_priv *priv = mlx4_priv(dev);
3438 int err, i;
3439
3440 mlx4_stop_sense(dev);
3441
3442 mutex_lock(&priv->port_mutex);
3443 for (i = 0; i < dev->caps.num_ports; i++)
3444 dev->caps.possible_type[i + 1] = poss_types[i];
3445 err = mlx4_change_port_types(dev, types);
3446 mlx4_start_sense(dev);
3447 mutex_unlock(&priv->port_mutex);
3448
3449 return err;
3450}
3451
3187int mlx4_restart_one(struct pci_dev *pdev) 3452int mlx4_restart_one(struct pci_dev *pdev)
3188{ 3453{
3189 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3454 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3455 struct mlx4_dev *dev = persist->dev;
3190 struct mlx4_priv *priv = mlx4_priv(dev); 3456 struct mlx4_priv *priv = mlx4_priv(dev);
3191 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3457 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3192 int pci_dev_data, err, total_vfs; 3458 int pci_dev_data, err, total_vfs;
3193 3459
3194 pci_dev_data = priv->pci_dev_data; 3460 pci_dev_data = priv->pci_dev_data;
3195 total_vfs = dev->num_vfs; 3461 total_vfs = dev->persist->num_vfs;
3196 memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs)); 3462 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3197 3463
3198 mlx4_unload_one(pdev); 3464 mlx4_unload_one(pdev);
3199 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv); 3465 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
3200 if (err) { 3466 if (err) {
3201 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3467 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3202 __func__, pci_name(pdev), err); 3468 __func__, pci_name(pdev), err);
3203 return err; 3469 return err;
3204 } 3470 }
3205 3471
3472 err = restore_current_port_types(dev, dev->persist->curr_port_type,
3473 dev->persist->curr_port_poss_type);
3474 if (err)
3475 mlx4_err(dev, "could not restore original port types (%d)\n",
3476 err);
3477
3206 return err; 3478 return err;
3207} 3479}
3208 3480
@@ -3257,23 +3529,79 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
3257static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 3529static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
3258 pci_channel_state_t state) 3530 pci_channel_state_t state)
3259{ 3531{
3260 mlx4_unload_one(pdev); 3532 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3533
3534 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
3535 mlx4_enter_error_state(persist);
3261 3536
3262 return state == pci_channel_io_perm_failure ? 3537 mutex_lock(&persist->interface_state_mutex);
3263 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 3538 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3539 mlx4_unload_one(pdev);
3540
3541 mutex_unlock(&persist->interface_state_mutex);
3542 if (state == pci_channel_io_perm_failure)
3543 return PCI_ERS_RESULT_DISCONNECT;
3544
3545 pci_disable_device(pdev);
3546 return PCI_ERS_RESULT_NEED_RESET;
3264} 3547}
3265 3548
3266static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 3549static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
3267{ 3550{
3268 struct mlx4_dev *dev = pci_get_drvdata(pdev); 3551 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3552 struct mlx4_dev *dev = persist->dev;
3269 struct mlx4_priv *priv = mlx4_priv(dev); 3553 struct mlx4_priv *priv = mlx4_priv(dev);
3270 int ret; 3554 int ret;
3555 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3556 int total_vfs;
3271 3557
3272 ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv); 3558 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
3559 ret = pci_enable_device(pdev);
3560 if (ret) {
3561 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
3562 return PCI_ERS_RESULT_DISCONNECT;
3563 }
3564
3565 pci_set_master(pdev);
3566 pci_restore_state(pdev);
3567 pci_save_state(pdev);
3568
3569 total_vfs = dev->persist->num_vfs;
3570 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3571
3572 mutex_lock(&persist->interface_state_mutex);
3573 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
3574 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
3575 priv, 1);
3576 if (ret) {
3577 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
3578 __func__, ret);
3579 goto end;
3580 }
3581
3582 ret = restore_current_port_types(dev, dev->persist->
3583 curr_port_type, dev->persist->
3584 curr_port_poss_type);
3585 if (ret)
3586 mlx4_err(dev, "could not restore original port types (%d)\n", ret);
3587 }
3588end:
3589 mutex_unlock(&persist->interface_state_mutex);
3273 3590
3274 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 3591 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
3275} 3592}
3276 3593
3594static void mlx4_shutdown(struct pci_dev *pdev)
3595{
3596 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3597
3598 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
3599 mutex_lock(&persist->interface_state_mutex);
3600 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3601 mlx4_unload_one(pdev);
3602 mutex_unlock(&persist->interface_state_mutex);
3603}
3604
3277static const struct pci_error_handlers mlx4_err_handler = { 3605static const struct pci_error_handlers mlx4_err_handler = {
3278 .error_detected = mlx4_pci_err_detected, 3606 .error_detected = mlx4_pci_err_detected,
3279 .slot_reset = mlx4_pci_slot_reset, 3607 .slot_reset = mlx4_pci_slot_reset,
@@ -3283,7 +3611,7 @@ static struct pci_driver mlx4_driver = {
3283 .name = DRV_NAME, 3611 .name = DRV_NAME,
3284 .id_table = mlx4_pci_table, 3612 .id_table = mlx4_pci_table,
3285 .probe = mlx4_init_one, 3613 .probe = mlx4_init_one,
3286 .shutdown = mlx4_unload_one, 3614 .shutdown = mlx4_shutdown,
3287 .remove = mlx4_remove_one, 3615 .remove = mlx4_remove_one,
3288 .err_handler = &mlx4_err_handler, 3616 .err_handler = &mlx4_err_handler,
3289}; 3617};
@@ -3335,7 +3663,6 @@ static int __init mlx4_init(void)
3335 if (mlx4_verify_params()) 3663 if (mlx4_verify_params())
3336 return -EINVAL; 3664 return -EINVAL;
3337 3665
3338 mlx4_catas_init();
3339 3666
3340 mlx4_wq = create_singlethread_workqueue("mlx4"); 3667 mlx4_wq = create_singlethread_workqueue("mlx4");
3341 if (!mlx4_wq) 3668 if (!mlx4_wq)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index a3867e7ef885..bd9ea0d01aae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1318,6 +1318,9 @@ out:
1318 mutex_unlock(&priv->mcg_table.mutex); 1318 mutex_unlock(&priv->mcg_table.mutex);
1319 1319
1320 mlx4_free_cmd_mailbox(dev, mailbox); 1320 mlx4_free_cmd_mailbox(dev, mailbox);
1321 if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
1322 /* In case device is under an error, return success as a closing command */
1323 err = 0;
1321 return err; 1324 return err;
1322} 1325}
1323 1326
@@ -1347,6 +1350,9 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
1347 MLX4_CMD_WRAPPED); 1350 MLX4_CMD_WRAPPED);
1348 1351
1349 mlx4_free_cmd_mailbox(dev, mailbox); 1352 mlx4_free_cmd_mailbox(dev, mailbox);
1353 if (err && !attach &&
1354 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
1355 err = 0;
1350 return err; 1356 return err;
1351} 1357}
1352 1358
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 210691c89b6c..1409d0cd6143 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -85,7 +85,9 @@ enum {
85 MLX4_CLR_INT_SIZE = 0x00008, 85 MLX4_CLR_INT_SIZE = 0x00008,
86 MLX4_SLAVE_COMM_BASE = 0x0, 86 MLX4_SLAVE_COMM_BASE = 0x0,
87 MLX4_COMM_PAGESIZE = 0x1000, 87 MLX4_COMM_PAGESIZE = 0x1000,
88 MLX4_CLOCK_SIZE = 0x00008 88 MLX4_CLOCK_SIZE = 0x00008,
89 MLX4_COMM_CHAN_CAPS = 0x8,
90 MLX4_COMM_CHAN_FLAGS = 0xc
89}; 91};
90 92
91enum { 93enum {
@@ -120,6 +122,10 @@ enum mlx4_mpt_state {
120}; 122};
121 123
122#define MLX4_COMM_TIME 10000 124#define MLX4_COMM_TIME 10000
125#define MLX4_COMM_OFFLINE_TIME_OUT 30000
126#define MLX4_COMM_CMD_NA_OP 0x0
127
128
123enum { 129enum {
124 MLX4_COMM_CMD_RESET, 130 MLX4_COMM_CMD_RESET,
125 MLX4_COMM_CMD_VHCR0, 131 MLX4_COMM_CMD_VHCR0,
@@ -190,6 +196,7 @@ struct mlx4_vhcr {
190struct mlx4_vhcr_cmd { 196struct mlx4_vhcr_cmd {
191 __be64 in_param; 197 __be64 in_param;
192 __be32 in_modifier; 198 __be32 in_modifier;
199 u32 reserved1;
193 __be64 out_param; 200 __be64 out_param;
194 __be16 token; 201 __be16 token;
195 u16 reserved; 202 u16 reserved;
@@ -221,19 +228,21 @@ extern int mlx4_debug_level;
221#define mlx4_dbg(mdev, format, ...) \ 228#define mlx4_dbg(mdev, format, ...) \
222do { \ 229do { \
223 if (mlx4_debug_level) \ 230 if (mlx4_debug_level) \
224 dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \ 231 dev_printk(KERN_DEBUG, \
232 &(mdev)->persist->pdev->dev, format, \
225 ##__VA_ARGS__); \ 233 ##__VA_ARGS__); \
226} while (0) 234} while (0)
227 235
228#define mlx4_err(mdev, format, ...) \ 236#define mlx4_err(mdev, format, ...) \
229 dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__) 237 dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
230#define mlx4_info(mdev, format, ...) \ 238#define mlx4_info(mdev, format, ...) \
231 dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__) 239 dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
232#define mlx4_warn(mdev, format, ...) \ 240#define mlx4_warn(mdev, format, ...) \
233 dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__) 241 dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
234 242
235extern int mlx4_log_num_mgm_entry_size; 243extern int mlx4_log_num_mgm_entry_size;
236extern int log_mtts_per_seg; 244extern int log_mtts_per_seg;
245extern int mlx4_internal_err_reset;
237 246
238#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \ 247#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
239 MLX4_MFUNC_MAX)) 248 MLX4_MFUNC_MAX))
@@ -607,7 +616,6 @@ struct mlx4_mgm {
607struct mlx4_cmd { 616struct mlx4_cmd {
608 struct pci_pool *pool; 617 struct pci_pool *pool;
609 void __iomem *hcr; 618 void __iomem *hcr;
610 struct mutex hcr_mutex;
611 struct mutex slave_cmd_mutex; 619 struct mutex slave_cmd_mutex;
612 struct semaphore poll_sem; 620 struct semaphore poll_sem;
613 struct semaphore event_sem; 621 struct semaphore event_sem;
@@ -878,6 +886,8 @@ struct mlx4_priv {
878 int reserved_mtts; 886 int reserved_mtts;
879 int fs_hash_mode; 887 int fs_hash_mode;
880 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 888 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
889 struct mlx4_port_map v2p; /* cached port mapping configuration */
890 struct mutex bond_mutex; /* for bond mode */
881 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 891 __be64 slave_node_guids[MLX4_MFUNC_MAX];
882 892
883 atomic_t opreq_count; 893 atomic_t opreq_count;
@@ -995,7 +1005,8 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
995 1005
996void mlx4_start_catas_poll(struct mlx4_dev *dev); 1006void mlx4_start_catas_poll(struct mlx4_dev *dev);
997void mlx4_stop_catas_poll(struct mlx4_dev *dev); 1007void mlx4_stop_catas_poll(struct mlx4_dev *dev);
998void mlx4_catas_init(void); 1008int mlx4_catas_init(struct mlx4_dev *dev);
1009void mlx4_catas_end(struct mlx4_dev *dev);
999int mlx4_restart_one(struct pci_dev *pdev); 1010int mlx4_restart_one(struct pci_dev *pdev);
1000int mlx4_register_device(struct mlx4_dev *dev); 1011int mlx4_register_device(struct mlx4_dev *dev);
1001void mlx4_unregister_device(struct mlx4_dev *dev); 1012void mlx4_unregister_device(struct mlx4_dev *dev);
@@ -1161,13 +1172,14 @@ enum {
1161int mlx4_cmd_init(struct mlx4_dev *dev); 1172int mlx4_cmd_init(struct mlx4_dev *dev);
1162void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); 1173void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask);
1163int mlx4_multi_func_init(struct mlx4_dev *dev); 1174int mlx4_multi_func_init(struct mlx4_dev *dev);
1175int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev);
1164void mlx4_multi_func_cleanup(struct mlx4_dev *dev); 1176void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
1165void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); 1177void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
1166int mlx4_cmd_use_events(struct mlx4_dev *dev); 1178int mlx4_cmd_use_events(struct mlx4_dev *dev);
1167void mlx4_cmd_use_polling(struct mlx4_dev *dev); 1179void mlx4_cmd_use_polling(struct mlx4_dev *dev);
1168 1180
1169int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, 1181int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
1170 unsigned long timeout); 1182 u16 op, unsigned long timeout);
1171 1183
1172void mlx4_cq_tasklet_cb(unsigned long data); 1184void mlx4_cq_tasklet_cb(unsigned long data);
1173void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); 1185void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
@@ -1177,7 +1189,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
1177 1189
1178void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); 1190void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
1179 1191
1180void mlx4_handle_catas_err(struct mlx4_dev *dev); 1192void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
1181 1193
1182int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, 1194int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
1183 enum mlx4_port_type *type); 1195 enum mlx4_port_type *type);
@@ -1355,6 +1367,7 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
1355/* Returns the VF index of slave */ 1367/* Returns the VF index of slave */
1356int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); 1368int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
1357int mlx4_config_mad_demux(struct mlx4_dev *dev); 1369int mlx4_config_mad_demux(struct mlx4_dev *dev);
1370int mlx4_do_bond(struct mlx4_dev *dev, bool enable);
1358 1371
1359enum mlx4_zone_flags { 1372enum mlx4_zone_flags {
1360 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, 1373 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 944a112dff37..2a8268e6be15 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -390,6 +390,7 @@ struct mlx4_en_dev {
390 struct pci_dev *pdev; 390 struct pci_dev *pdev;
391 struct mutex state_lock; 391 struct mutex state_lock;
392 struct net_device *pndev[MLX4_MAX_PORTS + 1]; 392 struct net_device *pndev[MLX4_MAX_PORTS + 1];
393 struct net_device *upper[MLX4_MAX_PORTS + 1];
393 u32 port_cnt; 394 u32 port_cnt;
394 bool device_up; 395 bool device_up;
395 struct mlx4_en_profile profile; 396 struct mlx4_en_profile profile;
@@ -410,6 +411,7 @@ struct mlx4_en_dev {
410 unsigned long overflow_period; 411 unsigned long overflow_period;
411 struct ptp_clock *ptp_clock; 412 struct ptp_clock *ptp_clock;
412 struct ptp_clock_info ptp_clock_info; 413 struct ptp_clock_info ptp_clock_info;
414 struct notifier_block nb;
413}; 415};
414 416
415 417
@@ -845,6 +847,9 @@ int mlx4_en_reset_config(struct net_device *dev,
845 struct hwtstamp_config ts_config, 847 struct hwtstamp_config ts_config,
846 netdev_features_t new_features); 848 netdev_features_t new_features);
847 849
850int mlx4_en_netdev_event(struct notifier_block *this,
851 unsigned long event, void *ptr);
852
848/* 853/*
849 * Functions for time stamping 854 * Functions for time stamping
850 */ 855 */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 7094a9c70fd5..78f51e103880 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -598,14 +598,11 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
598 if (err) 598 if (err)
599 return err; 599 return err;
600 600
601 mpt_entry->start = cpu_to_be64(mr->iova); 601 mpt_entry->start = cpu_to_be64(iova);
602 mpt_entry->length = cpu_to_be64(mr->size); 602 mpt_entry->length = cpu_to_be64(size);
603 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 603 mpt_entry->entity_size = cpu_to_be32(page_shift);
604 604 mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE |
605 mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | 605 MLX4_MPT_FLAG_SW_OWNS));
606 MLX4_MPT_PD_FLAG_EN_INV);
607 mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
608 MLX4_MPT_FLAG_SW_OWNS);
609 if (mr->mtt.order < 0) { 606 if (mr->mtt.order < 0) {
610 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 607 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
611 mpt_entry->mtt_addr = 0; 608 mpt_entry->mtt_addr = 0;
@@ -708,13 +705,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
708 if (!mtts) 705 if (!mtts)
709 return -ENOMEM; 706 return -ENOMEM;
710 707
711 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, 708 dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle,
712 npages * sizeof (u64), DMA_TO_DEVICE); 709 npages * sizeof (u64), DMA_TO_DEVICE);
713 710
714 for (i = 0; i < npages; ++i) 711 for (i = 0; i < npages; ++i)
715 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 712 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
716 713
717 dma_sync_single_for_device(&dev->pdev->dev, dma_handle, 714 dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle,
718 npages * sizeof (u64), DMA_TO_DEVICE); 715 npages * sizeof (u64), DMA_TO_DEVICE);
719 716
720 return 0; 717 return 0;
@@ -1020,13 +1017,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
1020 /* Make sure MPT status is visible before writing MTT entries */ 1017 /* Make sure MPT status is visible before writing MTT entries */
1021 wmb(); 1018 wmb();
1022 1019
1023 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, 1020 dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle,
1024 npages * sizeof(u64), DMA_TO_DEVICE); 1021 npages * sizeof(u64), DMA_TO_DEVICE);
1025 1022
1026 for (i = 0; i < npages; ++i) 1023 for (i = 0; i < npages; ++i)
1027 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 1024 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
1028 1025
1029 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, 1026 dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle,
1030 npages * sizeof(u64), DMA_TO_DEVICE); 1027 npages * sizeof(u64), DMA_TO_DEVICE);
1031 1028
1032 fmr->mpt->key = cpu_to_be32(key); 1029 fmr->mpt->key = cpu_to_be32(key);
@@ -1155,7 +1152,7 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free);
1155 1152
1156int mlx4_SYNC_TPT(struct mlx4_dev *dev) 1153int mlx4_SYNC_TPT(struct mlx4_dev *dev)
1157{ 1154{
1158 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, 1155 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
1159 MLX4_CMD_NATIVE); 1156 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1160} 1157}
1161EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); 1158EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 74216071201f..609c59dc854e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -151,11 +151,13 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
151 return -ENOMEM; 151 return -ENOMEM;
152 152
153 if (mlx4_is_slave(dev)) 153 if (mlx4_is_slave(dev))
154 offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) / 154 offset = uar->index % ((int)pci_resource_len(dev->persist->pdev,
155 2) /
155 dev->caps.uar_page_size); 156 dev->caps.uar_page_size);
156 else 157 else
157 offset = uar->index; 158 offset = uar->index;
158 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset; 159 uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT)
160 + offset;
159 uar->map = NULL; 161 uar->map = NULL;
160 return 0; 162 return 0;
161} 163}
@@ -212,7 +214,6 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
212 list_add(&uar->bf_list, &priv->bf_list); 214 list_add(&uar->bf_list, &priv->bf_list);
213 } 215 }
214 216
215 bf->uar = uar;
216 idx = ffz(uar->free_bf_bmap); 217 idx = ffz(uar->free_bf_bmap);
217 uar->free_bf_bmap |= 1 << idx; 218 uar->free_bf_bmap |= 1 << idx;
218 bf->uar = uar; 219 bf->uar = uar;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 30eb1ead0fe6..9f268f05290a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -553,9 +553,9 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
553 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( 553 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
554 dev, &exclusive_ports); 554 dev, &exclusive_ports);
555 slave_gid -= bitmap_weight(slaves_pport_actv.slaves, 555 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
556 dev->num_vfs + 1); 556 dev->persist->num_vfs + 1);
557 } 557 }
558 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 558 vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
559 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) 559 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
560 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; 560 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
561 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; 561 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
@@ -590,10 +590,10 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
590 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( 590 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
591 dev, &exclusive_ports); 591 dev, &exclusive_ports);
592 slave_gid -= bitmap_weight(slaves_pport_actv.slaves, 592 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
593 dev->num_vfs + 1); 593 dev->persist->num_vfs + 1);
594 } 594 }
595 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; 595 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
596 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 596 vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
597 if (slave_gid <= gids % vfs) 597 if (slave_gid <= gids % vfs)
598 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); 598 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
599 599
@@ -644,7 +644,7 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
644 int num_eth_ports, err; 644 int num_eth_ports, err;
645 int i; 645 int i;
646 646
647 if (slave < 0 || slave > dev->num_vfs) 647 if (slave < 0 || slave > dev->persist->num_vfs)
648 return; 648 return;
649 649
650 actv_ports = mlx4_get_active_ports(dev, slave); 650 actv_ports = mlx4_get_active_ports(dev, slave);
@@ -1214,7 +1214,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1214 return -EINVAL; 1214 return -EINVAL;
1215 1215
1216 slaves_pport = mlx4_phys_to_slaves_pport(dev, port); 1216 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1217 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 1217 num_vfs = bitmap_weight(slaves_pport.slaves,
1218 dev->persist->num_vfs + 1) - 1;
1218 1219
1219 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 1220 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1220 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, 1221 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
@@ -1258,7 +1259,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1258 dev, &exclusive_ports); 1259 dev, &exclusive_ports);
1259 num_vfs_before += bitmap_weight( 1260 num_vfs_before += bitmap_weight(
1260 slaves_pport_actv.slaves, 1261 slaves_pport_actv.slaves,
1261 dev->num_vfs + 1); 1262 dev->persist->num_vfs + 1);
1262 } 1263 }
1263 1264
1264 /* candidate_slave_gid isn't necessarily the correct slave, but 1265 /* candidate_slave_gid isn't necessarily the correct slave, but
@@ -1288,7 +1289,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1288 dev, &exclusive_ports); 1289 dev, &exclusive_ports);
1289 slave_gid += bitmap_weight( 1290 slave_gid += bitmap_weight(
1290 slaves_pport_actv.slaves, 1291 slaves_pport_actv.slaves,
1291 dev->num_vfs + 1); 1292 dev->persist->num_vfs + 1);
1292 } 1293 }
1293 } 1294 }
1294 *slave_id = slave_gid; 1295 *slave_id = slave_gid;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 1586ecce13c7..2bb8553bd905 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -882,6 +882,8 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
882 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 882 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
883 context->flags &= cpu_to_be32(~(0xf << 28)); 883 context->flags &= cpu_to_be32(~(0xf << 28));
884 context->flags |= cpu_to_be32(states[i + 1] << 28); 884 context->flags |= cpu_to_be32(states[i + 1] << 28);
885 if (states[i + 1] != MLX4_QP_STATE_RTR)
886 context->params2 &= ~MLX4_QP_BIT_FPP;
885 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 887 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
886 context, 0, 0, qp); 888 context, 0, 0, qp);
887 if (err) { 889 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index ea1c6d092145..0076d88587ca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -76,19 +76,21 @@ int mlx4_reset(struct mlx4_dev *dev)
76 goto out; 76 goto out;
77 } 77 }
78 78
79 pcie_cap = pci_pcie_cap(dev->pdev); 79 pcie_cap = pci_pcie_cap(dev->persist->pdev);
80 80
81 for (i = 0; i < 64; ++i) { 81 for (i = 0; i < 64; ++i) {
82 if (i == 22 || i == 23) 82 if (i == 22 || i == 23)
83 continue; 83 continue;
84 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { 84 if (pci_read_config_dword(dev->persist->pdev, i * 4,
85 hca_header + i)) {
85 err = -ENODEV; 86 err = -ENODEV;
86 mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n"); 87 mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
87 goto out; 88 goto out;
88 } 89 }
89 } 90 }
90 91
91 reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE, 92 reset = ioremap(pci_resource_start(dev->persist->pdev, 0) +
93 MLX4_RESET_BASE,
92 MLX4_RESET_SIZE); 94 MLX4_RESET_SIZE);
93 if (!reset) { 95 if (!reset) {
94 err = -ENOMEM; 96 err = -ENOMEM;
@@ -122,8 +124,8 @@ int mlx4_reset(struct mlx4_dev *dev)
122 124
123 end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; 125 end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
124 do { 126 do {
125 if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && 127 if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID,
126 vendor != 0xffff) 128 &vendor) && vendor != 0xffff)
127 break; 129 break;
128 130
129 msleep(1); 131 msleep(1);
@@ -138,14 +140,16 @@ int mlx4_reset(struct mlx4_dev *dev)
138 /* Now restore the PCI headers */ 140 /* Now restore the PCI headers */
139 if (pcie_cap) { 141 if (pcie_cap) {
140 devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; 142 devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
141 if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, 143 if (pcie_capability_write_word(dev->persist->pdev,
144 PCI_EXP_DEVCTL,
142 devctl)) { 145 devctl)) {
143 err = -ENODEV; 146 err = -ENODEV;
144 mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n"); 147 mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
145 goto out; 148 goto out;
146 } 149 }
147 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; 150 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
148 if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, 151 if (pcie_capability_write_word(dev->persist->pdev,
152 PCI_EXP_LNKCTL,
149 linkctl)) { 153 linkctl)) {
150 err = -ENODEV; 154 err = -ENODEV;
151 mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n"); 155 mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
@@ -157,7 +161,8 @@ int mlx4_reset(struct mlx4_dev *dev)
157 if (i * 4 == PCI_COMMAND) 161 if (i * 4 == PCI_COMMAND)
158 continue; 162 continue;
159 163
160 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { 164 if (pci_write_config_dword(dev->persist->pdev, i * 4,
165 hca_header[i])) {
161 err = -ENODEV; 166 err = -ENODEV;
162 mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n", 167 mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
163 i); 168 i);
@@ -165,7 +170,7 @@ int mlx4_reset(struct mlx4_dev *dev)
165 } 170 }
166 } 171 }
167 172
168 if (pci_write_config_dword(dev->pdev, PCI_COMMAND, 173 if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND,
169 hca_header[PCI_COMMAND / 4])) { 174 hca_header[PCI_COMMAND / 4])) {
170 err = -ENODEV; 175 err = -ENODEV;
171 mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n"); 176 mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 4efbd1eca611..486e3d26cd4a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -309,12 +309,13 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
309 int allocated, free, reserved, guaranteed, from_free; 309 int allocated, free, reserved, guaranteed, from_free;
310 int from_rsvd; 310 int from_rsvd;
311 311
312 if (slave > dev->num_vfs) 312 if (slave > dev->persist->num_vfs)
313 return -EINVAL; 313 return -EINVAL;
314 314
315 spin_lock(&res_alloc->alloc_lock); 315 spin_lock(&res_alloc->alloc_lock);
316 allocated = (port > 0) ? 316 allocated = (port > 0) ?
317 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : 317 res_alloc->allocated[(port - 1) *
318 (dev->persist->num_vfs + 1) + slave] :
318 res_alloc->allocated[slave]; 319 res_alloc->allocated[slave];
319 free = (port > 0) ? res_alloc->res_port_free[port - 1] : 320 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
320 res_alloc->res_free; 321 res_alloc->res_free;
@@ -352,7 +353,8 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
352 if (!err) { 353 if (!err) {
353 /* grant the request */ 354 /* grant the request */
354 if (port > 0) { 355 if (port > 0) {
355 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; 356 res_alloc->allocated[(port - 1) *
357 (dev->persist->num_vfs + 1) + slave] += count;
356 res_alloc->res_port_free[port - 1] -= count; 358 res_alloc->res_port_free[port - 1] -= count;
357 res_alloc->res_port_rsvd[port - 1] -= from_rsvd; 359 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
358 } else { 360 } else {
@@ -376,13 +378,14 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
376 &priv->mfunc.master.res_tracker.res_alloc[res_type]; 378 &priv->mfunc.master.res_tracker.res_alloc[res_type];
377 int allocated, guaranteed, from_rsvd; 379 int allocated, guaranteed, from_rsvd;
378 380
379 if (slave > dev->num_vfs) 381 if (slave > dev->persist->num_vfs)
380 return; 382 return;
381 383
382 spin_lock(&res_alloc->alloc_lock); 384 spin_lock(&res_alloc->alloc_lock);
383 385
384 allocated = (port > 0) ? 386 allocated = (port > 0) ?
385 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : 387 res_alloc->allocated[(port - 1) *
388 (dev->persist->num_vfs + 1) + slave] :
386 res_alloc->allocated[slave]; 389 res_alloc->allocated[slave];
387 guaranteed = res_alloc->guaranteed[slave]; 390 guaranteed = res_alloc->guaranteed[slave];
388 391
@@ -397,7 +400,8 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
397 } 400 }
398 401
399 if (port > 0) { 402 if (port > 0) {
400 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; 403 res_alloc->allocated[(port - 1) *
404 (dev->persist->num_vfs + 1) + slave] -= count;
401 res_alloc->res_port_free[port - 1] += count; 405 res_alloc->res_port_free[port - 1] += count;
402 res_alloc->res_port_rsvd[port - 1] += from_rsvd; 406 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
403 } else { 407 } else {
@@ -415,7 +419,8 @@ static inline void initialize_res_quotas(struct mlx4_dev *dev,
415 enum mlx4_resource res_type, 419 enum mlx4_resource res_type,
416 int vf, int num_instances) 420 int vf, int num_instances)
417{ 421{
418 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1)); 422 res_alloc->guaranteed[vf] = num_instances /
423 (2 * (dev->persist->num_vfs + 1));
419 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; 424 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420 if (vf == mlx4_master_func_num(dev)) { 425 if (vf == mlx4_master_func_num(dev)) {
421 res_alloc->res_free = num_instances; 426 res_alloc->res_free = num_instances;
@@ -486,21 +491,26 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
486 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 491 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487 struct resource_allocator *res_alloc = 492 struct resource_allocator *res_alloc =
488 &priv->mfunc.master.res_tracker.res_alloc[i]; 493 &priv->mfunc.master.res_tracker.res_alloc[i];
489 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 494 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
490 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 495 sizeof(int), GFP_KERNEL);
496 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
497 sizeof(int), GFP_KERNEL);
491 if (i == RES_MAC || i == RES_VLAN) 498 if (i == RES_MAC || i == RES_VLAN)
492 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * 499 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
493 (dev->num_vfs + 1) * sizeof(int), 500 (dev->persist->num_vfs
494 GFP_KERNEL); 501 + 1) *
502 sizeof(int), GFP_KERNEL);
495 else 503 else
496 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 504 res_alloc->allocated = kzalloc((dev->persist->
505 num_vfs + 1) *
506 sizeof(int), GFP_KERNEL);
497 507
498 if (!res_alloc->quota || !res_alloc->guaranteed || 508 if (!res_alloc->quota || !res_alloc->guaranteed ||
499 !res_alloc->allocated) 509 !res_alloc->allocated)
500 goto no_mem_err; 510 goto no_mem_err;
501 511
502 spin_lock_init(&res_alloc->alloc_lock); 512 spin_lock_init(&res_alloc->alloc_lock);
503 for (t = 0; t < dev->num_vfs + 1; t++) { 513 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
504 struct mlx4_active_ports actv_ports = 514 struct mlx4_active_ports actv_ports =
505 mlx4_get_active_ports(dev, t); 515 mlx4_get_active_ports(dev, t);
506 switch (i) { 516 switch (i) {
@@ -2531,7 +2541,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2531 /* Make sure that the PD bits related to the slave id are zeros. */ 2541 /* Make sure that the PD bits related to the slave id are zeros. */
2532 pd = mr_get_pd(inbox->buf); 2542 pd = mr_get_pd(inbox->buf);
2533 pd_slave = (pd >> 17) & 0x7f; 2543 pd_slave = (pd >> 17) & 0x7f;
2534 if (pd_slave != 0 && pd_slave != slave) { 2544 if (pd_slave != 0 && --pd_slave != slave) {
2535 err = -EPERM; 2545 err = -EPERM;
2536 goto ex_abort; 2546 goto ex_abort;
2537 } 2547 }
@@ -2934,6 +2944,9 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2934 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 2944 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2935 optpar = be32_to_cpu(*(__be32 *) inbox->buf); 2945 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2936 2946
2947 if (slave != mlx4_master_func_num(dev))
2948 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2949
2937 switch (qp_type) { 2950 switch (qp_type) {
2938 case MLX4_QP_ST_RC: 2951 case MLX4_QP_ST_RC:
2939 case MLX4_QP_ST_XRC: 2952 case MLX4_QP_ST_XRC:
@@ -4667,7 +4680,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4667 int state; 4680 int state;
4668 LIST_HEAD(tlist); 4681 LIST_HEAD(tlist);
4669 int eqn; 4682 int eqn;
4670 struct mlx4_cmd_mailbox *mailbox;
4671 4683
4672 err = move_all_busy(dev, slave, RES_EQ); 4684 err = move_all_busy(dev, slave, RES_EQ);
4673 if (err) 4685 if (err)
@@ -4693,20 +4705,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4693 break; 4705 break;
4694 4706
4695 case RES_EQ_HW: 4707 case RES_EQ_HW:
4696 mailbox = mlx4_alloc_cmd_mailbox(dev); 4708 err = mlx4_cmd(dev, slave, eqn & 0xff,
4697 if (IS_ERR(mailbox)) { 4709 1, MLX4_CMD_HW2SW_EQ,
4698 cond_resched(); 4710 MLX4_CMD_TIME_CLASS_A,
4699 continue; 4711 MLX4_CMD_NATIVE);
4700 }
4701 err = mlx4_cmd_box(dev, slave, 0,
4702 eqn & 0xff, 0,
4703 MLX4_CMD_HW2SW_EQ,
4704 MLX4_CMD_TIME_CLASS_A,
4705 MLX4_CMD_NATIVE);
4706 if (err) 4712 if (err)
4707 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", 4713 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4708 slave, eqn); 4714 slave, eqn);
4709 mlx4_free_cmd_mailbox(dev, mailbox);
4710 atomic_dec(&eq->mtt->ref_count); 4715 atomic_dec(&eq->mtt->ref_count);
4711 state = RES_EQ_RESERVED; 4716 state = RES_EQ_RESERVED;
4712 break; 4717 break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 56779c1c7811..201ca6d76ce5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -121,7 +121,7 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
121 dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, 121 dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
122 buf->direct.map); 122 buf->direct.map);
123 else { 123 else {
124 if (BITS_PER_LONG == 64 && buf->direct.buf) 124 if (BITS_PER_LONG == 64)
125 vunmap(buf->direct.buf); 125 vunmap(buf->direct.buf);
126 126
127 for (i = 0; i < buf->nbufs; i++) 127 for (i = 0; i < buf->nbufs; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 10e1f1a18255..4878025e231c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -300,11 +300,11 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
300 param = qp->pid; 300 param = qp->pid;
301 break; 301 break;
302 case QP_STATE: 302 case QP_STATE:
303 param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28); 303 param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
304 *is_str = 1; 304 *is_str = 1;
305 break; 305 break;
306 case QP_XPORT: 306 case QP_XPORT:
307 param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff); 307 param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
308 *is_str = 1; 308 *is_str = 1;
309 break; 309 break;
310 case QP_MTU: 310 case QP_MTU:
@@ -464,7 +464,7 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
464 464
465 465
466 if (is_str) 466 if (is_str)
467 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field); 467 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
468 else 468 else
469 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); 469 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
470 470
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3f4525619a07..d6651937d899 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -903,12 +903,12 @@ static void remove_one(struct pci_dev *pdev)
903} 903}
904 904
905static const struct pci_device_id mlx5_core_pci_table[] = { 905static const struct pci_device_id mlx5_core_pci_table[] = {
906 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ 906 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
907 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ 907 { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
908 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ 908 { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
909 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ 909 { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */
910 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ 910 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
911 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ 911 { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */
912 { 0, } 912 { 0, }
913}; 913};
914 914
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 2fa6ae026e4f..10988fbf47eb 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4342,9 +4342,7 @@ static void ksz_init_timer(struct ksz_timer_info *info, int period,
4342{ 4342{
4343 info->max = 0; 4343 info->max = 0;
4344 info->period = period; 4344 info->period = period;
4345 init_timer(&info->timer); 4345 setup_timer(&info->timer, function, (unsigned long)data);
4346 info->timer.function = function;
4347 info->timer.data = (unsigned long) data;
4348} 4346}
4349 4347
4350static void ksz_update_timer(struct ksz_timer_info *info) 4348static void ksz_update_timer(struct ksz_timer_info *info)
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 71af98bb72cb..1412f5af05ec 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4226,8 +4226,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
4226 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 4226 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
4227#endif 4227#endif
4228 myri10ge_free_slices(mgp); 4228 myri10ge_free_slices(mgp);
4229 if (mgp->msix_vectors != NULL) 4229 kfree(mgp->msix_vectors);
4230 kfree(mgp->msix_vectors);
4231 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 4230 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
4232 mgp->cmd, mgp->cmd_bus); 4231 mgp->cmd, mgp->cmd_bus);
4233 4232
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 2552e550a78c..eb807b0dc72a 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1122,12 +1122,12 @@ again:
1122 } 1122 }
1123 1123
1124#ifdef NS83820_VLAN_ACCEL_SUPPORT 1124#ifdef NS83820_VLAN_ACCEL_SUPPORT
1125 if(vlan_tx_tag_present(skb)) { 1125 if (skb_vlan_tag_present(skb)) {
1126 /* fetch the vlan tag info out of the 1126 /* fetch the vlan tag info out of the
1127 * ancillary data if the vlan code 1127 * ancillary data if the vlan code
1128 * is using hw vlan acceleration 1128 * is using hw vlan acceleration
1129 */ 1129 */
1130 short tag = vlan_tx_tag_get(skb); 1130 short tag = skb_vlan_tag_get(skb);
1131 extsts |= (EXTSTS_VPKT | htons(tag)); 1131 extsts |= (EXTSTS_VPKT | htons(tag));
1132 } 1132 }
1133#endif 1133#endif
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index db0c7a9aee60..a4cdf2f8041a 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4045,8 +4045,8 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4045 } 4045 }
4046 4046
4047 queue = 0; 4047 queue = 0;
4048 if (vlan_tx_tag_present(skb)) 4048 if (skb_vlan_tag_present(skb))
4049 vlan_tag = vlan_tx_tag_get(skb); 4049 vlan_tag = skb_vlan_tag_get(skb);
4050 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) { 4050 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4051 if (skb->protocol == htons(ETH_P_IP)) { 4051 if (skb->protocol == htons(ETH_P_IP)) {
4052 struct iphdr *ip; 4052 struct iphdr *ip;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 2bbd01fcb9b0..6223930a8155 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -4637,7 +4637,7 @@ static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4637 vpath->ringh = NULL; 4637 vpath->ringh = NULL;
4638 vpath->fifoh = NULL; 4638 vpath->fifoh = NULL;
4639 memset(&vpath->vpath_handles, 0, sizeof(struct list_head)); 4639 memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
4640 vpath->stats_block = 0; 4640 vpath->stats_block = NULL;
4641 vpath->hw_stats = NULL; 4641 vpath->hw_stats = NULL;
4642 vpath->hw_stats_sav = NULL; 4642 vpath->hw_stats_sav = NULL;
4643 vpath->sw_stats = NULL; 4643 vpath->sw_stats = NULL;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index cc0485e3c621..50d5604833ed 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -890,8 +890,8 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
890 dev->name, __func__, __LINE__, 890 dev->name, __func__, __LINE__,
891 fifo_hw, dtr, dtr_priv); 891 fifo_hw, dtr, dtr_priv);
892 892
893 if (vlan_tx_tag_present(skb)) { 893 if (skb_vlan_tag_present(skb)) {
894 u16 vlan_tag = vlan_tx_tag_get(skb); 894 u16 vlan_tag = skb_vlan_tag_get(skb);
895 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); 895 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
896 } 896 }
897 897
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index f39cae620f61..a41bb5e6b954 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2462,9 +2462,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2462 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2462 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2463 2463
2464 /* vlan tag */ 2464 /* vlan tag */
2465 if (vlan_tx_tag_present(skb)) 2465 if (skb_vlan_tag_present(skb))
2466 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | 2466 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2467 vlan_tx_tag_get(skb)); 2467 skb_vlan_tag_get(skb));
2468 else 2468 else
2469 start_tx->txvlan = 0; 2469 start_tx->txvlan = 0;
2470 2470
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index c531c8ae1be4..e0c31e3947d1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -176,9 +176,7 @@ netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
176static void 176static void
177netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) 177netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
178{ 178{
179 if (recv_ctx->sds_rings != NULL) 179 kfree(recv_ctx->sds_rings);
180 kfree(recv_ctx->sds_rings);
181
182 recv_ctx->sds_rings = NULL; 180 recv_ctx->sds_rings = NULL;
183} 181}
184 182
@@ -1893,9 +1891,9 @@ netxen_tso_check(struct net_device *netdev,
1893 protocol = vh->h_vlan_encapsulated_proto; 1891 protocol = vh->h_vlan_encapsulated_proto;
1894 flags = FLAGS_VLAN_TAGGED; 1892 flags = FLAGS_VLAN_TAGGED;
1895 1893
1896 } else if (vlan_tx_tag_present(skb)) { 1894 } else if (skb_vlan_tag_present(skb)) {
1897 flags = FLAGS_VLAN_OOB; 1895 flags = FLAGS_VLAN_OOB;
1898 vid = vlan_tx_tag_get(skb); 1896 vid = skb_vlan_tag_get(skb);
1899 netxen_set_tx_vlan_tci(first_desc, vid); 1897 netxen_set_tx_vlan_tci(first_desc, vid);
1900 vlan_oob = 1; 1898 vlan_oob = 1;
1901 } 1899 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 4e1f58cf19ce..d4b5085a21fa 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -10,6 +10,7 @@
10#include <net/ip.h> 10#include <net/ip.h>
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <net/checksum.h> 12#include <net/checksum.h>
13#include <linux/printk.h>
13 14
14#include "qlcnic.h" 15#include "qlcnic.h"
15 16
@@ -320,8 +321,8 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
320 if (protocol == ETH_P_8021Q) { 321 if (protocol == ETH_P_8021Q) {
321 vh = (struct vlan_ethhdr *)skb->data; 322 vh = (struct vlan_ethhdr *)skb->data;
322 vlan_id = ntohs(vh->h_vlan_TCI); 323 vlan_id = ntohs(vh->h_vlan_TCI);
323 } else if (vlan_tx_tag_present(skb)) { 324 } else if (skb_vlan_tag_present(skb)) {
324 vlan_id = vlan_tx_tag_get(skb); 325 vlan_id = skb_vlan_tag_get(skb);
325 } 326 }
326 } 327 }
327 328
@@ -472,9 +473,9 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
472 flags = QLCNIC_FLAGS_VLAN_TAGGED; 473 flags = QLCNIC_FLAGS_VLAN_TAGGED;
473 vlan_tci = ntohs(vh->h_vlan_TCI); 474 vlan_tci = ntohs(vh->h_vlan_TCI);
474 protocol = ntohs(vh->h_vlan_encapsulated_proto); 475 protocol = ntohs(vh->h_vlan_encapsulated_proto);
475 } else if (vlan_tx_tag_present(skb)) { 476 } else if (skb_vlan_tag_present(skb)) {
476 flags = QLCNIC_FLAGS_VLAN_OOB; 477 flags = QLCNIC_FLAGS_VLAN_OOB;
477 vlan_tci = vlan_tx_tag_get(skb); 478 vlan_tci = skb_vlan_tag_get(skb);
478 } 479 }
479 if (unlikely(adapter->tx_pvid)) { 480 if (unlikely(adapter->tx_pvid)) {
480 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) 481 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
@@ -1473,14 +1474,14 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1473 1474
1474static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter) 1475static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1475{ 1476{
1476 int i; 1477 if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
1477 unsigned char *data = skb->data; 1478 char prefix[30];
1478 1479
1479 pr_info(KERN_INFO "\n"); 1480 scnprintf(prefix, sizeof(prefix), "%s: %s: ",
1480 for (i = 0; i < skb->len; i++) { 1481 dev_name(&adapter->pdev->dev), __func__);
1481 QLCDB(adapter, DRV, "%02x ", data[i]); 1482
1482 if ((i & 0x0f) == 8) 1483 print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
1483 pr_info(KERN_INFO "\n"); 1484 skb->data, skb->len, true);
1484 } 1485 }
1485} 1486}
1486 1487
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2528c3fb6b90..a430a34a4434 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -294,9 +294,7 @@ int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
294 294
295void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) 295void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
296{ 296{
297 if (recv_ctx->sds_rings != NULL) 297 kfree(recv_ctx->sds_rings);
298 kfree(recv_ctx->sds_rings);
299
300 recv_ctx->sds_rings = NULL; 298 recv_ctx->sds_rings = NULL;
301} 299}
302 300
@@ -1257,8 +1255,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
1257 if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) { 1255 if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
1258 if (fw_dump->tmpl_hdr == NULL || 1256 if (fw_dump->tmpl_hdr == NULL ||
1259 adapter->fw_version > prev_fw_version) { 1257 adapter->fw_version > prev_fw_version) {
1260 if (fw_dump->tmpl_hdr) 1258 vfree(fw_dump->tmpl_hdr);
1261 vfree(fw_dump->tmpl_hdr);
1262 if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) 1259 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
1263 dev_info(&pdev->dev, 1260 dev_info(&pdev->dev,
1264 "Supports FW dump capability\n"); 1261 "Supports FW dump capability\n");
@@ -2374,13 +2371,12 @@ void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
2374 2371
2375 for (ring = 0; ring < adapter->drv_tx_rings; ring++) { 2372 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
2376 tx_ring = &adapter->tx_ring[ring]; 2373 tx_ring = &adapter->tx_ring[ring];
2377 if (tx_ring && tx_ring->cmd_buf_arr != NULL) { 2374 if (tx_ring) {
2378 vfree(tx_ring->cmd_buf_arr); 2375 vfree(tx_ring->cmd_buf_arr);
2379 tx_ring->cmd_buf_arr = NULL; 2376 tx_ring->cmd_buf_arr = NULL;
2380 } 2377 }
2381 } 2378 }
2382 if (adapter->tx_ring != NULL) 2379 kfree(adapter->tx_ring);
2383 kfree(adapter->tx_ring);
2384} 2380}
2385 2381
2386int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, 2382int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
@@ -2758,13 +2754,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
2758 } 2754 }
2759 2755
2760 qlcnic_dcb_free(adapter->dcb); 2756 qlcnic_dcb_free(adapter->dcb);
2761
2762 qlcnic_detach(adapter); 2757 qlcnic_detach(adapter);
2763 2758 kfree(adapter->npars);
2764 if (adapter->npars != NULL) 2759 kfree(adapter->eswitch);
2765 kfree(adapter->npars);
2766 if (adapter->eswitch != NULL)
2767 kfree(adapter->eswitch);
2768 2760
2769 if (qlcnic_82xx_check(adapter)) 2761 if (qlcnic_82xx_check(adapter))
2770 qlcnic_clr_all_drv_state(adapter, 0); 2762 qlcnic_clr_all_drv_state(adapter, 0);
@@ -2932,13 +2924,13 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
2932 2924
2933static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter) 2925static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
2934{ 2926{
2935 if (adapter->fhash.fmax && adapter->fhash.fhead) 2927 if (adapter->fhash.fmax)
2936 kfree(adapter->fhash.fhead); 2928 kfree(adapter->fhash.fhead);
2937 2929
2938 adapter->fhash.fhead = NULL; 2930 adapter->fhash.fhead = NULL;
2939 adapter->fhash.fmax = 0; 2931 adapter->fhash.fmax = 0;
2940 2932
2941 if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead) 2933 if (adapter->rx_fhash.fmax)
2942 kfree(adapter->rx_fhash.fhead); 2934 kfree(adapter->rx_fhash.fhead);
2943 2935
2944 adapter->rx_fhash.fmax = 0; 2936 adapter->rx_fhash.fmax = 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index c9f57fb84b9e..332bb8a3f430 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1407,8 +1407,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
1407 current_version = qlcnic_83xx_get_fw_version(adapter); 1407 current_version = qlcnic_83xx_get_fw_version(adapter);
1408 1408
1409 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { 1409 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
1410 if (fw_dump->tmpl_hdr) 1410 vfree(fw_dump->tmpl_hdr);
1411 vfree(fw_dump->tmpl_hdr);
1412 if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) 1411 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
1413 dev_info(&pdev->dev, "Supports FW dump capability\n"); 1412 dev_info(&pdev->dev, "Supports FW dump capability\n");
1414 } 1413 }
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index ef5aed3b1225..8011ef3e7707 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2666,11 +2666,11 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2666 2666
2667 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); 2667 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2668 2668
2669 if (vlan_tx_tag_present(skb)) { 2669 if (skb_vlan_tag_present(skb)) {
2670 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, 2670 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2671 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); 2671 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2672 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; 2672 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2673 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); 2673 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2674 } 2674 }
2675 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); 2675 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2676 if (tso < 0) { 2676 if (tso < 0) {
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 9c31e46d1eee..d79e33b3c191 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -708,8 +708,8 @@ static void cp_tx (struct cp_private *cp)
708 708
709static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) 709static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
710{ 710{
711 return vlan_tx_tag_present(skb) ? 711 return skb_vlan_tag_present(skb) ?
712 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 712 TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
713} 713}
714 714
715static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, 715static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index fa274e0f47d7..ad0020af2193 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2073,8 +2073,8 @@ static int rtl8169_set_features(struct net_device *dev,
2073 2073
2074static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) 2074static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
2075{ 2075{
2076 return (vlan_tx_tag_present(skb)) ? 2076 return (skb_vlan_tag_present(skb)) ?
2077 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 2077 TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
2078} 2078}
2079 2079
2080static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) 2080static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
@@ -7049,6 +7049,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7049 u32 status, len; 7049 u32 status, len;
7050 u32 opts[2]; 7050 u32 opts[2];
7051 int frags; 7051 int frags;
7052 bool stop_queue;
7052 7053
7053 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { 7054 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
7054 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 7055 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7105,11 +7106,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7105 7106
7106 tp->cur_tx += frags + 1; 7107 tp->cur_tx += frags + 1;
7107 7108
7108 RTL_W8(TxPoll, NPQ); 7109 stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
7109 7110
7110 mmiowb(); 7111 if (!skb->xmit_more || stop_queue ||
7112 netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
7113 RTL_W8(TxPoll, NPQ);
7114
7115 mmiowb();
7116 }
7111 7117
7112 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { 7118 if (stop_queue) {
7113 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 7119 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
7114 * not miss a ring update when it notices a stopped queue. 7120 * not miss a ring update when it notices a stopped queue.
7115 */ 7121 */
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 04283fe0e6a7..4da8bd263997 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -597,7 +597,7 @@ static struct sh_eth_cpu_data sh7757_data = {
597static void sh_eth_chip_reset_giga(struct net_device *ndev) 597static void sh_eth_chip_reset_giga(struct net_device *ndev)
598{ 598{
599 int i; 599 int i;
600 unsigned long mahr[2], malr[2]; 600 u32 mahr[2], malr[2];
601 601
602 /* save MAHR and MALR */ 602 /* save MAHR and MALR */
603 for (i = 0; i < 2; i++) { 603 for (i = 0; i < 2; i++) {
@@ -991,7 +991,7 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
991 } 991 }
992} 992}
993 993
994static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 994static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
995{ 995{
996 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) 996 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
997 return EDTRR_TRNS_GETHER; 997 return EDTRR_TRNS_GETHER;
@@ -1565,7 +1565,7 @@ static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1565} 1565}
1566 1566
1567/* error control function */ 1567/* error control function */
1568static void sh_eth_error(struct net_device *ndev, int intr_status) 1568static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1569{ 1569{
1570 struct sh_eth_private *mdp = netdev_priv(ndev); 1570 struct sh_eth_private *mdp = netdev_priv(ndev);
1571 u32 felic_stat; 1571 u32 felic_stat;
@@ -1678,7 +1678,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1678 struct sh_eth_private *mdp = netdev_priv(ndev); 1678 struct sh_eth_private *mdp = netdev_priv(ndev);
1679 struct sh_eth_cpu_data *cd = mdp->cd; 1679 struct sh_eth_cpu_data *cd = mdp->cd;
1680 irqreturn_t ret = IRQ_NONE; 1680 irqreturn_t ret = IRQ_NONE;
1681 unsigned long intr_status, intr_enable; 1681 u32 intr_status, intr_enable;
1682 1682
1683 spin_lock(&mdp->lock); 1683 spin_lock(&mdp->lock);
1684 1684
@@ -1709,7 +1709,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1709 __napi_schedule(&mdp->napi); 1709 __napi_schedule(&mdp->napi);
1710 } else { 1710 } else {
1711 netdev_warn(ndev, 1711 netdev_warn(ndev,
1712 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n", 1712 "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1713 intr_status, intr_enable); 1713 intr_status, intr_enable);
1714 } 1714 }
1715 } 1715 }
@@ -1742,7 +1742,7 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
1742 napi); 1742 napi);
1743 struct net_device *ndev = napi->dev; 1743 struct net_device *ndev = napi->dev;
1744 int quota = budget; 1744 int quota = budget;
1745 unsigned long intr_status; 1745 u32 intr_status;
1746 1746
1747 for (;;) { 1747 for (;;) {
1748 intr_status = sh_eth_read(ndev, EESR); 1748 intr_status = sh_eth_read(ndev, EESR);
@@ -2133,7 +2133,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
2133 2133
2134 netif_err(mdp, timer, ndev, 2134 netif_err(mdp, timer, ndev,
2135 "transmit timed out, status %8.8x, resetting...\n", 2135 "transmit timed out, status %8.8x, resetting...\n",
2136 (int)sh_eth_read(ndev, EESR)); 2136 sh_eth_read(ndev, EESR));
2137 2137
2138 /* tx_errors count up */ 2138 /* tx_errors count up */
2139 ndev->stats.tx_errors++; 2139 ndev->stats.tx_errors++;
@@ -3019,6 +3019,36 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
3019} 3019}
3020 3020
3021#ifdef CONFIG_PM 3021#ifdef CONFIG_PM
3022#ifdef CONFIG_PM_SLEEP
3023static int sh_eth_suspend(struct device *dev)
3024{
3025 struct net_device *ndev = dev_get_drvdata(dev);
3026 int ret = 0;
3027
3028 if (netif_running(ndev)) {
3029 netif_device_detach(ndev);
3030 ret = sh_eth_close(ndev);
3031 }
3032
3033 return ret;
3034}
3035
3036static int sh_eth_resume(struct device *dev)
3037{
3038 struct net_device *ndev = dev_get_drvdata(dev);
3039 int ret = 0;
3040
3041 if (netif_running(ndev)) {
3042 ret = sh_eth_open(ndev);
3043 if (ret < 0)
3044 return ret;
3045 netif_device_attach(ndev);
3046 }
3047
3048 return ret;
3049}
3050#endif
3051
3022static int sh_eth_runtime_nop(struct device *dev) 3052static int sh_eth_runtime_nop(struct device *dev)
3023{ 3053{
3024 /* Runtime PM callback shared between ->runtime_suspend() 3054 /* Runtime PM callback shared between ->runtime_suspend()
@@ -3032,8 +3062,8 @@ static int sh_eth_runtime_nop(struct device *dev)
3032} 3062}
3033 3063
3034static const struct dev_pm_ops sh_eth_dev_pm_ops = { 3064static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3035 .runtime_suspend = sh_eth_runtime_nop, 3065 SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3036 .runtime_resume = sh_eth_runtime_nop, 3066 SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3037}; 3067};
3038#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) 3068#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3039#else 3069#else
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 332d3c16d483..259d03f353e1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -459,21 +459,21 @@ struct sh_eth_cpu_data {
459 459
460 /* mandatory initialize value */ 460 /* mandatory initialize value */
461 int register_type; 461 int register_type;
462 unsigned long eesipr_value; 462 u32 eesipr_value;
463 463
464 /* optional initialize value */ 464 /* optional initialize value */
465 unsigned long ecsr_value; 465 u32 ecsr_value;
466 unsigned long ecsipr_value; 466 u32 ecsipr_value;
467 unsigned long fdr_value; 467 u32 fdr_value;
468 unsigned long fcftr_value; 468 u32 fcftr_value;
469 unsigned long rpadir_value; 469 u32 rpadir_value;
470 470
471 /* interrupt checking mask */ 471 /* interrupt checking mask */
472 unsigned long tx_check; 472 u32 tx_check;
473 unsigned long eesr_err_check; 473 u32 eesr_err_check;
474 474
475 /* Error mask */ 475 /* Error mask */
476 unsigned long trscer_err_mask; 476 u32 trscer_err_mask;
477 477
478 /* hardware features */ 478 /* hardware features */
479 unsigned long irq_flags; /* IRQ configuration flags */ 479 unsigned long irq_flags; /* IRQ configuration flags */
@@ -543,7 +543,7 @@ static inline void sh_eth_soft_swap(char *src, int len)
543#endif 543#endif
544} 544}
545 545
546static inline void sh_eth_write(struct net_device *ndev, unsigned long data, 546static inline void sh_eth_write(struct net_device *ndev, u32 data,
547 int enum_index) 547 int enum_index)
548{ 548{
549 struct sh_eth_private *mdp = netdev_priv(ndev); 549 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -551,8 +551,7 @@ static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
551 iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]); 551 iowrite32(data, mdp->addr + mdp->reg_offset[enum_index]);
552} 552}
553 553
554static inline unsigned long sh_eth_read(struct net_device *ndev, 554static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
555 int enum_index)
556{ 555{
557 struct sh_eth_private *mdp = netdev_priv(ndev); 556 struct sh_eth_private *mdp = netdev_priv(ndev);
558 557
@@ -565,14 +564,13 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
565 return mdp->tsu_addr + mdp->reg_offset[enum_index]; 564 return mdp->tsu_addr + mdp->reg_offset[enum_index];
566} 565}
567 566
568static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, 567static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
569 unsigned long data, int enum_index) 568 int enum_index)
570{ 569{
571 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); 570 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
572} 571}
573 572
574static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp, 573static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
575 int enum_index)
576{ 574{
577 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); 575 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
578} 576}
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2f398fa4b9e6..34389b6aa67c 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -806,13 +806,13 @@ static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
806 806
807static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info) 807static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
808{ 808{
809 return (void *) desc_info->desc->cookie; 809 return (void *)(uintptr_t)desc_info->desc->cookie;
810} 810}
811 811
812static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info, 812static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
813 void *ptr) 813 void *ptr)
814{ 814{
815 desc_info->desc->cookie = (long) ptr; 815 desc_info->desc->cookie = (uintptr_t) ptr;
816} 816}
817 817
818static struct rocker_desc_info * 818static struct rocker_desc_info *
@@ -3026,11 +3026,17 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
3026 container_of(work, struct rocker_fdb_learn_work, work); 3026 container_of(work, struct rocker_fdb_learn_work, work);
3027 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); 3027 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3028 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); 3028 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3029 struct netdev_switch_notifier_fdb_info info;
3030
3031 info.addr = lw->addr;
3032 info.vid = lw->vid;
3029 3033
3030 if (learned && removing) 3034 if (learned && removing)
3031 br_fdb_external_learn_del(lw->dev, lw->addr, lw->vid); 3035 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
3036 lw->dev, &info.info);
3032 else if (learned && !removing) 3037 else if (learned && !removing)
3033 br_fdb_external_learn_add(lw->dev, lw->addr, lw->vid); 3038 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
3039 lw->dev, &info.info);
3034 3040
3035 kfree(work); 3041 kfree(work);
3036} 3042}
@@ -3565,6 +3571,8 @@ nest_cancel:
3565 rocker_tlv_nest_cancel(desc_info, frags); 3571 rocker_tlv_nest_cancel(desc_info, frags);
3566out: 3572out:
3567 dev_kfree_skb(skb); 3573 dev_kfree_skb(skb);
3574 dev->stats.tx_dropped++;
3575
3568 return NETDEV_TX_OK; 3576 return NETDEV_TX_OK;
3569} 3577}
3570 3578
@@ -3668,7 +3676,8 @@ static int rocker_fdb_fill_info(struct sk_buff *skb,
3668 if (vid && nla_put_u16(skb, NDA_VLAN, vid)) 3676 if (vid && nla_put_u16(skb, NDA_VLAN, vid))
3669 goto nla_put_failure; 3677 goto nla_put_failure;
3670 3678
3671 return nlmsg_end(skb, nlh); 3679 nlmsg_end(skb, nlh);
3680 return 0;
3672 3681
3673nla_put_failure: 3682nla_put_failure:
3674 nlmsg_cancel(skb, nlh); 3683 nlmsg_cancel(skb, nlh);
@@ -3713,7 +3722,7 @@ skip:
3713} 3722}
3714 3723
3715static int rocker_port_bridge_setlink(struct net_device *dev, 3724static int rocker_port_bridge_setlink(struct net_device *dev,
3716 struct nlmsghdr *nlh) 3725 struct nlmsghdr *nlh, u16 flags)
3717{ 3726{
3718 struct rocker_port *rocker_port = netdev_priv(dev); 3727 struct rocker_port *rocker_port = netdev_priv(dev);
3719 struct nlattr *protinfo; 3728 struct nlattr *protinfo;
@@ -3824,11 +3833,145 @@ static void rocker_port_get_drvinfo(struct net_device *dev,
3824 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); 3833 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
3825} 3834}
3826 3835
3836static struct rocker_port_stats {
3837 char str[ETH_GSTRING_LEN];
3838 int type;
3839} rocker_port_stats[] = {
3840 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
3841 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
3842 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
3843 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
3844
3845 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
3846 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
3847 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
3848 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
3849};
3850
3851#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
3852
3853static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
3854 u8 *data)
3855{
3856 u8 *p = data;
3857 int i;
3858
3859 switch (stringset) {
3860 case ETH_SS_STATS:
3861 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
3862 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
3863 p += ETH_GSTRING_LEN;
3864 }
3865 break;
3866 }
3867}
3868
3869static int
3870rocker_cmd_get_port_stats_prep(struct rocker *rocker,
3871 struct rocker_port *rocker_port,
3872 struct rocker_desc_info *desc_info,
3873 void *priv)
3874{
3875 struct rocker_tlv *cmd_stats;
3876
3877 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
3878 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
3879 return -EMSGSIZE;
3880
3881 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
3882 if (!cmd_stats)
3883 return -EMSGSIZE;
3884
3885 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT,
3886 rocker_port->lport))
3887 return -EMSGSIZE;
3888
3889 rocker_tlv_nest_end(desc_info, cmd_stats);
3890
3891 return 0;
3892}
3893
3894static int
3895rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
3896 struct rocker_port *rocker_port,
3897 struct rocker_desc_info *desc_info,
3898 void *priv)
3899{
3900 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
3901 struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
3902 struct rocker_tlv *pattr;
3903 u32 lport;
3904 u64 *data = priv;
3905 int i;
3906
3907 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
3908
3909 if (!attrs[ROCKER_TLV_CMD_INFO])
3910 return -EIO;
3911
3912 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
3913 attrs[ROCKER_TLV_CMD_INFO]);
3914
3915 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT])
3916 return -EIO;
3917
3918 lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]);
3919 if (lport != rocker_port->lport)
3920 return -EIO;
3921
3922 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
3923 pattr = stats_attrs[rocker_port_stats[i].type];
3924 if (!pattr)
3925 continue;
3926
3927 data[i] = rocker_tlv_get_u64(pattr);
3928 }
3929
3930 return 0;
3931}
3932
3933static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
3934 void *priv)
3935{
3936 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
3937 rocker_cmd_get_port_stats_prep, NULL,
3938 rocker_cmd_get_port_stats_ethtool_proc,
3939 priv, false);
3940}
3941
3942static void rocker_port_get_stats(struct net_device *dev,
3943 struct ethtool_stats *stats, u64 *data)
3944{
3945 struct rocker_port *rocker_port = netdev_priv(dev);
3946
3947 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
3948 int i;
3949
3950 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
3951 data[i] = 0;
3952 }
3953
3954 return;
3955}
3956
3957static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
3958{
3959 switch (sset) {
3960 case ETH_SS_STATS:
3961 return ROCKER_PORT_STATS_LEN;
3962 default:
3963 return -EOPNOTSUPP;
3964 }
3965}
3966
3827static const struct ethtool_ops rocker_port_ethtool_ops = { 3967static const struct ethtool_ops rocker_port_ethtool_ops = {
3828 .get_settings = rocker_port_get_settings, 3968 .get_settings = rocker_port_get_settings,
3829 .set_settings = rocker_port_set_settings, 3969 .set_settings = rocker_port_set_settings,
3830 .get_drvinfo = rocker_port_get_drvinfo, 3970 .get_drvinfo = rocker_port_get_drvinfo,
3831 .get_link = ethtool_op_get_link, 3971 .get_link = ethtool_op_get_link,
3972 .get_strings = rocker_port_get_strings,
3973 .get_ethtool_stats = rocker_port_get_stats,
3974 .get_sset_count = rocker_port_get_sset_count,
3832}; 3975};
3833 3976
3834/***************** 3977/*****************
@@ -3850,12 +3993,22 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
3850 3993
3851 /* Cleanup tx descriptors */ 3994 /* Cleanup tx descriptors */
3852 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { 3995 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
3996 struct sk_buff *skb;
3997
3853 err = rocker_desc_err(desc_info); 3998 err = rocker_desc_err(desc_info);
3854 if (err && net_ratelimit()) 3999 if (err && net_ratelimit())
3855 netdev_err(rocker_port->dev, "tx desc received with err %d\n", 4000 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
3856 err); 4001 err);
3857 rocker_tx_desc_frags_unmap(rocker_port, desc_info); 4002 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
3858 dev_kfree_skb_any(rocker_desc_cookie_ptr_get(desc_info)); 4003
4004 skb = rocker_desc_cookie_ptr_get(desc_info);
4005 if (err == 0) {
4006 rocker_port->dev->stats.tx_packets++;
4007 rocker_port->dev->stats.tx_bytes += skb->len;
4008 } else
4009 rocker_port->dev->stats.tx_errors++;
4010
4011 dev_kfree_skb_any(skb);
3859 credits++; 4012 credits++;
3860 } 4013 }
3861 4014
@@ -3888,6 +4041,10 @@ static int rocker_port_rx_proc(struct rocker *rocker,
3888 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); 4041 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
3889 skb_put(skb, rx_len); 4042 skb_put(skb, rx_len);
3890 skb->protocol = eth_type_trans(skb, rocker_port->dev); 4043 skb->protocol = eth_type_trans(skb, rocker_port->dev);
4044
4045 rocker_port->dev->stats.rx_packets++;
4046 rocker_port->dev->stats.rx_bytes += skb->len;
4047
3891 netif_receive_skb(skb); 4048 netif_receive_skb(skb);
3892 4049
3893 return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info); 4050 return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
@@ -3921,6 +4078,9 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
3921 netdev_err(rocker_port->dev, "rx processing failed with err %d\n", 4078 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
3922 err); 4079 err);
3923 } 4080 }
4081 if (err)
4082 rocker_port->dev->stats.rx_errors++;
4083
3924 rocker_desc_gen_clear(desc_info); 4084 rocker_desc_gen_clear(desc_info);
3925 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); 4085 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
3926 credits++; 4086 credits++;
@@ -4004,7 +4164,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4004 NAPI_POLL_WEIGHT); 4164 NAPI_POLL_WEIGHT);
4005 rocker_carrier_init(rocker_port); 4165 rocker_carrier_init(rocker_port);
4006 4166
4007 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4167 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4168 NETIF_F_HW_SWITCH_OFFLOAD;
4008 4169
4009 err = register_netdev(dev); 4170 err = register_netdev(dev);
4010 if (err) { 4171 if (err) {
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 8d2865ba634c..a5bc432feada 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -127,6 +127,9 @@ enum {
127 ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, 127 ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL,
128 ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, 128 ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS,
129 129
130 ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS,
131 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS,
132
130 __ROCKER_TLV_CMD_TYPE_MAX, 133 __ROCKER_TLV_CMD_TYPE_MAX,
131 ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, 134 ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1,
132}; 135};
@@ -146,6 +149,24 @@ enum {
146 __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, 149 __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1,
147}; 150};
148 151
152enum {
153 ROCKER_TLV_CMD_PORT_STATS_UNSPEC,
154 ROCKER_TLV_CMD_PORT_STATS_LPORT, /* u32 */
155
156 ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */
157 ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */
158 ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */
159 ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */
160
161 ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */
162 ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */
163 ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */
164 ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */
165
166 __ROCKER_TLV_CMD_PORT_STATS_MAX,
167 ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1,
168};
169
149enum rocker_port_mode { 170enum rocker_port_mode {
150 ROCKER_PORT_MODE_OF_DPA, 171 ROCKER_PORT_MODE_OF_DPA,
151}; 172};
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index b1a271853d85..c8a01ee4d25e 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -133,9 +133,8 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
133 return false; 133 return false;
134 134
135 priv->eee_active = 1; 135 priv->eee_active = 1;
136 init_timer(&priv->eee_ctrl_timer); 136 setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer,
137 priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer; 137 (unsigned long)priv);
138 priv->eee_ctrl_timer.data = (unsigned long)priv;
139 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer); 138 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
140 add_timer(&priv->eee_ctrl_timer); 139 add_timer(&priv->eee_ctrl_timer);
141 140
@@ -365,6 +364,26 @@ static int sxgbe_init_rx_buffers(struct net_device *dev,
365 364
366 return 0; 365 return 0;
367} 366}
367
368/**
369 * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
370 * @dev: net device structure
371 * @rx_ring: ring to be freed
372 * @rx_rsize: ring size
373 * Description: this function initializes the DMA RX descriptor
374 */
375static void sxgbe_free_rx_buffers(struct net_device *dev,
376 struct sxgbe_rx_norm_desc *p, int i,
377 unsigned int dma_buf_sz,
378 struct sxgbe_rx_queue *rx_ring)
379{
380 struct sxgbe_priv_data *priv = netdev_priv(dev);
381
382 kfree_skb(rx_ring->rx_skbuff[i]);
383 dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i],
384 dma_buf_sz, DMA_FROM_DEVICE);
385}
386
368/** 387/**
369 * init_tx_ring - init the TX descriptor ring 388 * init_tx_ring - init the TX descriptor ring
370 * @dev: net device structure 389 * @dev: net device structure
@@ -457,7 +476,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
457 /* RX ring is not allcoated */ 476 /* RX ring is not allcoated */
458 if (rx_ring == NULL) { 477 if (rx_ring == NULL) {
459 netdev_err(dev, "No memory for RX queue\n"); 478 netdev_err(dev, "No memory for RX queue\n");
460 goto error; 479 return -ENOMEM;
461 } 480 }
462 481
463 /* assign queue number */ 482 /* assign queue number */
@@ -469,23 +488,21 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
469 &rx_ring->dma_rx_phy, GFP_KERNEL); 488 &rx_ring->dma_rx_phy, GFP_KERNEL);
470 489
471 if (rx_ring->dma_rx == NULL) 490 if (rx_ring->dma_rx == NULL)
472 goto error; 491 return -ENOMEM;
473 492
474 /* allocate memory for RX skbuff array */ 493 /* allocate memory for RX skbuff array */
475 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, 494 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
476 sizeof(dma_addr_t), GFP_KERNEL); 495 sizeof(dma_addr_t), GFP_KERNEL);
477 if (!rx_ring->rx_skbuff_dma) { 496 if (!rx_ring->rx_skbuff_dma) {
478 dma_free_coherent(priv->device, 497 ret = -ENOMEM;
479 rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 498 goto err_free_dma_rx;
480 rx_ring->dma_rx, rx_ring->dma_rx_phy);
481 goto error;
482 } 499 }
483 500
484 rx_ring->rx_skbuff = kmalloc_array(rx_rsize, 501 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
485 sizeof(struct sk_buff *), GFP_KERNEL); 502 sizeof(struct sk_buff *), GFP_KERNEL);
486 if (!rx_ring->rx_skbuff) { 503 if (!rx_ring->rx_skbuff) {
487 kfree(rx_ring->rx_skbuff_dma); 504 ret = -ENOMEM;
488 goto error; 505 goto err_free_skbuff_dma;
489 } 506 }
490 507
491 /* initialise the buffers */ 508 /* initialise the buffers */
@@ -495,7 +512,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
495 ret = sxgbe_init_rx_buffers(dev, p, desc_index, 512 ret = sxgbe_init_rx_buffers(dev, p, desc_index,
496 bfsize, rx_ring); 513 bfsize, rx_ring);
497 if (ret) 514 if (ret)
498 goto err_init_rx_buffers; 515 goto err_free_rx_buffers;
499 } 516 }
500 517
501 /* initalise counters */ 518 /* initalise counters */
@@ -505,11 +522,22 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
505 522
506 return 0; 523 return 0;
507 524
508err_init_rx_buffers: 525err_free_rx_buffers:
509 while (--desc_index >= 0) 526 while (--desc_index >= 0) {
510 free_rx_ring(priv->device, rx_ring, desc_index); 527 struct sxgbe_rx_norm_desc *p;
511error: 528
512 return -ENOMEM; 529 p = rx_ring->dma_rx + desc_index;
530 sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring);
531 }
532 kfree(rx_ring->rx_skbuff);
533err_free_skbuff_dma:
534 kfree(rx_ring->rx_skbuff_dma);
535err_free_dma_rx:
536 dma_free_coherent(priv->device,
537 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
538 rx_ring->dma_rx, rx_ring->dma_rx_phy);
539
540 return ret;
513} 541}
514/** 542/**
515 * free_tx_ring - free the TX descriptor ring 543 * free_tx_ring - free the TX descriptor ring
@@ -1008,10 +1036,9 @@ static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
1008 struct sxgbe_tx_queue *p = priv->txq[queue_num]; 1036 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1009 p->tx_coal_frames = SXGBE_TX_FRAMES; 1037 p->tx_coal_frames = SXGBE_TX_FRAMES;
1010 p->tx_coal_timer = SXGBE_COAL_TX_TIMER; 1038 p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
1011 init_timer(&p->txtimer); 1039 setup_timer(&p->txtimer, sxgbe_tx_timer,
1040 (unsigned long)&priv->txq[queue_num]);
1012 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer); 1041 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
1013 p->txtimer.data = (unsigned long)&priv->txq[queue_num];
1014 p->txtimer.function = sxgbe_tx_timer;
1015 add_timer(&p->txtimer); 1042 add_timer(&p->txtimer);
1016 } 1043 }
1017} 1044}
@@ -1273,7 +1300,7 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
1273 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss)) 1300 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
1274 ctxt_desc_req = 1; 1301 ctxt_desc_req = 1;
1275 1302
1276 if (unlikely(vlan_tx_tag_present(skb) || 1303 if (unlikely(skb_vlan_tag_present(skb) ||
1277 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 1304 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1278 tqueue->hwts_tx_en))) 1305 tqueue->hwts_tx_en)))
1279 ctxt_desc_req = 1; 1306 ctxt_desc_req = 1;
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 9468e64e6007..3e97a8b43147 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -5,8 +5,9 @@
5config NET_VENDOR_SMSC 5config NET_VENDOR_SMSC
6 bool "SMC (SMSC)/Western Digital devices" 6 bool "SMC (SMSC)/Western Digital devices"
7 default y 7 default y
8 depends on ARM || ISA || MAC || ARM64 || MIPS || M32R || SUPERH || \ 8 depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \
9 BLACKFIN || MN10300 || COLDFIRE || XTENSA || NIOS2 || PCI || PCMCIA 9 ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \
10 PCMCIA || SUPERH || XTENSA
10 ---help--- 11 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y 12 If you have a network (Ethernet) card belonging to this class, say Y
12 and read the Ethernet-HOWTO, available from 13 and read the Ethernet-HOWTO, available from
@@ -38,8 +39,9 @@ config SMC91X
38 tristate "SMC 91C9x/91C1xxx support" 39 tristate "SMC 91C9x/91C1xxx support"
39 select CRC32 40 select CRC32
40 select MII 41 select MII
41 depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ 42 depends on !OF || GPIOLIB
42 MN10300 || COLDFIRE || ARM64 || XTENSA || NIOS2) && (!OF || GPIOLIB) 43 depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \
44 M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA
43 ---help--- 45 ---help---
44 This is a driver for SMC's 91x series of Ethernet chipsets, 46 This is a driver for SMC's 91x series of Ethernet chipsets,
45 including the SMC91C94 and the SMC91C111. Say Y if you want it 47 including the SMC91C94 and the SMC91C111. Say Y if you want it
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 2a38dacbbd27..be67baf5f677 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -216,6 +216,27 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
216 216
217#include <unit/smc91111.h> 217#include <unit/smc91111.h>
218 218
219#elif defined(CONFIG_ATARI)
220
221#define SMC_CAN_USE_8BIT 1
222#define SMC_CAN_USE_16BIT 1
223#define SMC_CAN_USE_32BIT 1
224#define SMC_NOWAIT 1
225
226#define SMC_inb(a, r) readb((a) + (r))
227#define SMC_inw(a, r) readw((a) + (r))
228#define SMC_inl(a, r) readl((a) + (r))
229#define SMC_outb(v, a, r) writeb(v, (a) + (r))
230#define SMC_outw(v, a, r) writew(v, (a) + (r))
231#define SMC_outl(v, a, r) writel(v, (a) + (r))
232#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
233#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
234#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
235#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
236
237#define RPC_LSA_DEFAULT RPC_LED_100_10
238#define RPC_LSB_DEFAULT RPC_LED_TX_RX
239
219#elif defined(CONFIG_ARCH_MSM) 240#elif defined(CONFIG_ARCH_MSM)
220 241
221#define SMC_CAN_USE_8BIT 0 242#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index ac4d5629d905..73c2715a27f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
6 6
7obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o 7obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
8stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \ 8stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \
9 dwmac-sti.o dwmac-socfpga.o 9 dwmac-sti.o dwmac-socfpga.o dwmac-rk.o
10 10
11obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o 11obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
12stmmac-pci-objs:= stmmac_pci.o 12stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
new file mode 100644
index 000000000000..6249a4ec08f0
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -0,0 +1,437 @@
1/**
2 * dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer
3 *
4 * Copyright (C) 2014 Chen-Zhi (Roger Chen)
5 *
6 * Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/stmmac.h>
20#include <linux/bitops.h>
21#include <linux/clk.h>
22#include <linux/phy.h>
23#include <linux/of_net.h>
24#include <linux/gpio.h>
25#include <linux/of_gpio.h>
26#include <linux/of_device.h>
27#include <linux/regulator/consumer.h>
28#include <linux/delay.h>
29#include <linux/mfd/syscon.h>
30#include <linux/regmap.h>
31
32struct rk_priv_data {
33 struct platform_device *pdev;
34 int phy_iface;
35 struct regulator *regulator;
36
37 bool clk_enabled;
38 bool clock_input;
39
40 struct clk *clk_mac;
41 struct clk *clk_mac_pll;
42 struct clk *gmac_clkin;
43 struct clk *mac_clk_rx;
44 struct clk *mac_clk_tx;
45 struct clk *clk_mac_ref;
46 struct clk *clk_mac_refout;
47 struct clk *aclk_mac;
48 struct clk *pclk_mac;
49
50 int tx_delay;
51 int rx_delay;
52
53 struct regmap *grf;
54};
55
56#define HIWORD_UPDATE(val, mask, shift) \
57 ((val) << (shift) | (mask) << ((shift) + 16))
58
59#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
60#define GRF_CLR_BIT(nr) (BIT(nr+16))
61
62#define RK3288_GRF_SOC_CON1 0x0248
63#define RK3288_GRF_SOC_CON3 0x0250
64#define RK3288_GRF_GPIO3D_E 0x01ec
65#define RK3288_GRF_GPIO4A_E 0x01f0
66#define RK3288_GRF_GPIO4B_E 0x01f4
67
68/*RK3288_GRF_SOC_CON1*/
69#define GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
70#define GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
71#define GMAC_FLOW_CTRL GRF_BIT(9)
72#define GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
73#define GMAC_SPEED_10M GRF_CLR_BIT(10)
74#define GMAC_SPEED_100M GRF_BIT(10)
75#define GMAC_RMII_CLK_25M GRF_BIT(11)
76#define GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
77#define GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
78#define GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
79#define GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
80#define GMAC_RMII_MODE GRF_BIT(14)
81#define GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
82
83/*RK3288_GRF_SOC_CON3*/
84#define GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
85#define GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
86#define GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
87#define GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
88#define GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
89#define GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
90
91static void set_to_rgmii(struct rk_priv_data *bsp_priv,
92 int tx_delay, int rx_delay)
93{
94 struct device *dev = &bsp_priv->pdev->dev;
95
96 if (IS_ERR(bsp_priv->grf)) {
97 dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
98 return;
99 }
100
101 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
102 GMAC_PHY_INTF_SEL_RGMII | GMAC_RMII_MODE_CLR);
103 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
104 GMAC_RXCLK_DLY_ENABLE | GMAC_TXCLK_DLY_ENABLE |
105 GMAC_CLK_RX_DL_CFG(rx_delay) |
106 GMAC_CLK_TX_DL_CFG(tx_delay));
107}
108
109static void set_to_rmii(struct rk_priv_data *bsp_priv)
110{
111 struct device *dev = &bsp_priv->pdev->dev;
112
113 if (IS_ERR(bsp_priv->grf)) {
114 dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
115 return;
116 }
117
118 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
119 GMAC_PHY_INTF_SEL_RMII | GMAC_RMII_MODE);
120}
121
122static void set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
123{
124 struct device *dev = &bsp_priv->pdev->dev;
125
126 if (IS_ERR(bsp_priv->grf)) {
127 dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
128 return;
129 }
130
131 if (speed == 10)
132 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_2_5M);
133 else if (speed == 100)
134 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_25M);
135 else if (speed == 1000)
136 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_125M);
137 else
138 dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
139}
140
141static void set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
142{
143 struct device *dev = &bsp_priv->pdev->dev;
144
145 if (IS_ERR(bsp_priv->grf)) {
146 dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
147 return;
148 }
149
150 if (speed == 10) {
151 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
152 GMAC_RMII_CLK_2_5M | GMAC_SPEED_10M);
153 } else if (speed == 100) {
154 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
155 GMAC_RMII_CLK_25M | GMAC_SPEED_100M);
156 } else {
157 dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
158 }
159}
160
161static int gmac_clk_init(struct rk_priv_data *bsp_priv)
162{
163 struct device *dev = &bsp_priv->pdev->dev;
164
165 bsp_priv->clk_enabled = false;
166
167 bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
168 if (IS_ERR(bsp_priv->mac_clk_rx))
169 dev_err(dev, "%s: cannot get clock %s\n",
170 __func__, "mac_clk_rx");
171
172 bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
173 if (IS_ERR(bsp_priv->mac_clk_tx))
174 dev_err(dev, "%s: cannot get clock %s\n",
175 __func__, "mac_clk_tx");
176
177 bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
178 if (IS_ERR(bsp_priv->aclk_mac))
179 dev_err(dev, "%s: cannot get clock %s\n",
180 __func__, "aclk_mac");
181
182 bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
183 if (IS_ERR(bsp_priv->pclk_mac))
184 dev_err(dev, "%s: cannot get clock %s\n",
185 __func__, "pclk_mac");
186
187 bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
188 if (IS_ERR(bsp_priv->clk_mac))
189 dev_err(dev, "%s: cannot get clock %s\n",
190 __func__, "stmmaceth");
191
192 if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
193 bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
194 if (IS_ERR(bsp_priv->clk_mac_ref))
195 dev_err(dev, "%s: cannot get clock %s\n",
196 __func__, "clk_mac_ref");
197
198 if (!bsp_priv->clock_input) {
199 bsp_priv->clk_mac_refout =
200 devm_clk_get(dev, "clk_mac_refout");
201 if (IS_ERR(bsp_priv->clk_mac_refout))
202 dev_err(dev, "%s: cannot get clock %s\n",
203 __func__, "clk_mac_refout");
204 }
205 }
206
207 if (bsp_priv->clock_input) {
208 dev_info(dev, "%s: clock input from PHY\n", __func__);
209 } else {
210 if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
211 clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
212 }
213
214 return 0;
215}
216
217static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
218{
219 int phy_iface = phy_iface = bsp_priv->phy_iface;
220
221 if (enable) {
222 if (!bsp_priv->clk_enabled) {
223 if (phy_iface == PHY_INTERFACE_MODE_RMII) {
224 if (!IS_ERR(bsp_priv->mac_clk_rx))
225 clk_prepare_enable(
226 bsp_priv->mac_clk_rx);
227
228 if (!IS_ERR(bsp_priv->clk_mac_ref))
229 clk_prepare_enable(
230 bsp_priv->clk_mac_ref);
231
232 if (!IS_ERR(bsp_priv->clk_mac_refout))
233 clk_prepare_enable(
234 bsp_priv->clk_mac_refout);
235 }
236
237 if (!IS_ERR(bsp_priv->aclk_mac))
238 clk_prepare_enable(bsp_priv->aclk_mac);
239
240 if (!IS_ERR(bsp_priv->pclk_mac))
241 clk_prepare_enable(bsp_priv->pclk_mac);
242
243 if (!IS_ERR(bsp_priv->mac_clk_tx))
244 clk_prepare_enable(bsp_priv->mac_clk_tx);
245
246 /**
247 * if (!IS_ERR(bsp_priv->clk_mac))
248 * clk_prepare_enable(bsp_priv->clk_mac);
249 */
250 mdelay(5);
251 bsp_priv->clk_enabled = true;
252 }
253 } else {
254 if (bsp_priv->clk_enabled) {
255 if (phy_iface == PHY_INTERFACE_MODE_RMII) {
256 if (!IS_ERR(bsp_priv->mac_clk_rx))
257 clk_disable_unprepare(
258 bsp_priv->mac_clk_rx);
259
260 if (!IS_ERR(bsp_priv->clk_mac_ref))
261 clk_disable_unprepare(
262 bsp_priv->clk_mac_ref);
263
264 if (!IS_ERR(bsp_priv->clk_mac_refout))
265 clk_disable_unprepare(
266 bsp_priv->clk_mac_refout);
267 }
268
269 if (!IS_ERR(bsp_priv->aclk_mac))
270 clk_disable_unprepare(bsp_priv->aclk_mac);
271
272 if (!IS_ERR(bsp_priv->pclk_mac))
273 clk_disable_unprepare(bsp_priv->pclk_mac);
274
275 if (!IS_ERR(bsp_priv->mac_clk_tx))
276 clk_disable_unprepare(bsp_priv->mac_clk_tx);
277 /**
278 * if (!IS_ERR(bsp_priv->clk_mac))
279 * clk_disable_unprepare(bsp_priv->clk_mac);
280 */
281 bsp_priv->clk_enabled = false;
282 }
283 }
284
285 return 0;
286}
287
288static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
289{
290 struct regulator *ldo = bsp_priv->regulator;
291 int ret;
292 struct device *dev = &bsp_priv->pdev->dev;
293
294 if (!ldo) {
295 dev_err(dev, "%s: no regulator found\n", __func__);
296 return -1;
297 }
298
299 if (enable) {
300 ret = regulator_enable(ldo);
301 if (ret)
302 dev_err(dev, "%s: fail to enable phy-supply\n",
303 __func__);
304 } else {
305 ret = regulator_disable(ldo);
306 if (ret)
307 dev_err(dev, "%s: fail to disable phy-supply\n",
308 __func__);
309 }
310
311 return 0;
312}
313
314static void *rk_gmac_setup(struct platform_device *pdev)
315{
316 struct rk_priv_data *bsp_priv;
317 struct device *dev = &pdev->dev;
318 int ret;
319 const char *strings = NULL;
320 int value;
321
322 bsp_priv = devm_kzalloc(dev, sizeof(*bsp_priv), GFP_KERNEL);
323 if (!bsp_priv)
324 return ERR_PTR(-ENOMEM);
325
326 bsp_priv->phy_iface = of_get_phy_mode(dev->of_node);
327
328 bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
329 if (IS_ERR(bsp_priv->regulator)) {
330 if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) {
331 dev_err(dev, "phy regulator is not available yet, deferred probing\n");
332 return ERR_PTR(-EPROBE_DEFER);
333 }
334 dev_err(dev, "no regulator found\n");
335 bsp_priv->regulator = NULL;
336 }
337
338 ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
339 if (ret) {
340 dev_err(dev, "%s: Can not read property: clock_in_out.\n",
341 __func__);
342 bsp_priv->clock_input = true;
343 } else {
344 dev_info(dev, "%s: clock input or output? (%s).\n",
345 __func__, strings);
346 if (!strcmp(strings, "input"))
347 bsp_priv->clock_input = true;
348 else
349 bsp_priv->clock_input = false;
350 }
351
352 ret = of_property_read_u32(dev->of_node, "tx_delay", &value);
353 if (ret) {
354 bsp_priv->tx_delay = 0x30;
355 dev_err(dev, "%s: Can not read property: tx_delay.", __func__);
356 dev_err(dev, "%s: set tx_delay to 0x%x\n",
357 __func__, bsp_priv->tx_delay);
358 } else {
359 dev_info(dev, "%s: TX delay(0x%x).\n", __func__, value);
360 bsp_priv->tx_delay = value;
361 }
362
363 ret = of_property_read_u32(dev->of_node, "rx_delay", &value);
364 if (ret) {
365 bsp_priv->rx_delay = 0x10;
366 dev_err(dev, "%s: Can not read property: rx_delay.", __func__);
367 dev_err(dev, "%s: set rx_delay to 0x%x\n",
368 __func__, bsp_priv->rx_delay);
369 } else {
370 dev_info(dev, "%s: RX delay(0x%x).\n", __func__, value);
371 bsp_priv->rx_delay = value;
372 }
373
374 bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
375 "rockchip,grf");
376 bsp_priv->pdev = pdev;
377
378 /*rmii or rgmii*/
379 if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
380 dev_info(dev, "%s: init for RGMII\n", __func__);
381 set_to_rgmii(bsp_priv, bsp_priv->tx_delay, bsp_priv->rx_delay);
382 } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
383 dev_info(dev, "%s: init for RMII\n", __func__);
384 set_to_rmii(bsp_priv);
385 } else {
386 dev_err(dev, "%s: NO interface defined!\n", __func__);
387 }
388
389 gmac_clk_init(bsp_priv);
390
391 return bsp_priv;
392}
393
394static int rk_gmac_init(struct platform_device *pdev, void *priv)
395{
396 struct rk_priv_data *bsp_priv = priv;
397 int ret;
398
399 ret = phy_power_on(bsp_priv, true);
400 if (ret)
401 return ret;
402
403 ret = gmac_clk_enable(bsp_priv, true);
404 if (ret)
405 return ret;
406
407 return 0;
408}
409
410static void rk_gmac_exit(struct platform_device *pdev, void *priv)
411{
412 struct rk_priv_data *gmac = priv;
413
414 phy_power_on(gmac, false);
415 gmac_clk_enable(gmac, false);
416}
417
418static void rk_fix_speed(void *priv, unsigned int speed)
419{
420 struct rk_priv_data *bsp_priv = priv;
421 struct device *dev = &bsp_priv->pdev->dev;
422
423 if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
424 set_rgmii_speed(bsp_priv, speed);
425 else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
426 set_rmii_speed(bsp_priv, speed);
427 else
428 dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
429}
430
431const struct stmmac_of_data rk3288_gmac_data = {
432 .has_gmac = 1,
433 .fix_mac_speed = rk_fix_speed,
434 .setup = rk_gmac_setup,
435 .init = rk_gmac_init,
436 .exit = rk_gmac_exit,
437};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 056b358b4a72..bb6e2dc61bec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -122,7 +122,7 @@ struct sti_dwmac {
122 bool ext_phyclk; /* Clock from external PHY */ 122 bool ext_phyclk; /* Clock from external PHY */
123 u32 tx_retime_src; /* TXCLK Retiming*/ 123 u32 tx_retime_src; /* TXCLK Retiming*/
124 struct clk *clk; /* PHY clock */ 124 struct clk *clk; /* PHY clock */
125 int ctrl_reg; /* GMAC glue-logic control register */ 125 u32 ctrl_reg; /* GMAC glue-logic control register */
126 int clk_sel_reg; /* GMAC ext clk selection register */ 126 int clk_sel_reg; /* GMAC ext clk selection register */
127 struct device *dev; 127 struct device *dev;
128 struct regmap *regmap; 128 struct regmap *regmap;
@@ -285,11 +285,6 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
285 if (!np) 285 if (!np)
286 return -EINVAL; 286 return -EINVAL;
287 287
288 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
289 if (!res)
290 return -ENODATA;
291 dwmac->ctrl_reg = res->start;
292
293 /* clk selection from extra syscfg register */ 288 /* clk selection from extra syscfg register */
294 dwmac->clk_sel_reg = -ENXIO; 289 dwmac->clk_sel_reg = -ENXIO;
295 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf"); 290 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
@@ -300,6 +295,12 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
300 if (IS_ERR(regmap)) 295 if (IS_ERR(regmap))
301 return PTR_ERR(regmap); 296 return PTR_ERR(regmap);
302 297
298 err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
299 if (err) {
300 dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
301 return err;
302 }
303
303 dwmac->dev = dev; 304 dwmac->dev = dev;
304 dwmac->interface = of_get_phy_mode(np); 305 dwmac->interface = of_get_phy_mode(np);
305 dwmac->regmap = regmap; 306 dwmac->regmap = regmap;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index cf62ff4c8c56..55e89b3838f1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1097,6 +1097,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1097 1097
1098 priv->dirty_tx = 0; 1098 priv->dirty_tx = 0;
1099 priv->cur_tx = 0; 1099 priv->cur_tx = 0;
1100 netdev_reset_queue(priv->dev);
1100 1101
1101 stmmac_clear_descriptors(priv); 1102 stmmac_clear_descriptors(priv);
1102 1103
@@ -1287,7 +1288,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1287 * that needs to not insert csum in the TDES. 1288 * that needs to not insert csum in the TDES.
1288 */ 1289 */
1289 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE); 1290 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
1290 tc = SF_DMA_MODE; 1291 priv->xstats.threshold = SF_DMA_MODE;
1291 } else 1292 } else
1292 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 1293 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
1293} 1294}
@@ -1300,6 +1301,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1300static void stmmac_tx_clean(struct stmmac_priv *priv) 1301static void stmmac_tx_clean(struct stmmac_priv *priv)
1301{ 1302{
1302 unsigned int txsize = priv->dma_tx_size; 1303 unsigned int txsize = priv->dma_tx_size;
1304 unsigned int bytes_compl = 0, pkts_compl = 0;
1303 1305
1304 spin_lock(&priv->tx_lock); 1306 spin_lock(&priv->tx_lock);
1305 1307
@@ -1356,6 +1358,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1356 priv->hw->mode->clean_desc3(priv, p); 1358 priv->hw->mode->clean_desc3(priv, p);
1357 1359
1358 if (likely(skb != NULL)) { 1360 if (likely(skb != NULL)) {
1361 pkts_compl++;
1362 bytes_compl += skb->len;
1359 dev_consume_skb_any(skb); 1363 dev_consume_skb_any(skb);
1360 priv->tx_skbuff[entry] = NULL; 1364 priv->tx_skbuff[entry] = NULL;
1361 } 1365 }
@@ -1364,6 +1368,9 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1364 1368
1365 priv->dirty_tx++; 1369 priv->dirty_tx++;
1366 } 1370 }
1371
1372 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1373
1367 if (unlikely(netif_queue_stopped(priv->dev) && 1374 if (unlikely(netif_queue_stopped(priv->dev) &&
1368 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { 1375 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
1369 netif_tx_lock(priv->dev); 1376 netif_tx_lock(priv->dev);
@@ -1418,6 +1425,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
1418 (i == txsize - 1)); 1425 (i == txsize - 1));
1419 priv->dirty_tx = 0; 1426 priv->dirty_tx = 0;
1420 priv->cur_tx = 0; 1427 priv->cur_tx = 0;
1428 netdev_reset_queue(priv->dev);
1421 priv->hw->dma->start_tx(priv->ioaddr); 1429 priv->hw->dma->start_tx(priv->ioaddr);
1422 1430
1423 priv->dev->stats.tx_errors++; 1431 priv->dev->stats.tx_errors++;
@@ -1444,9 +1452,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1444 } 1452 }
1445 if (unlikely(status & tx_hard_error_bump_tc)) { 1453 if (unlikely(status & tx_hard_error_bump_tc)) {
1446 /* Try to bump up the dma threshold on this failure */ 1454 /* Try to bump up the dma threshold on this failure */
1447 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 1455 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1456 (tc <= 256)) {
1448 tc += 64; 1457 tc += 64;
1449 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 1458 if (priv->plat->force_thresh_dma_mode)
1459 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
1460 else
1461 priv->hw->dma->dma_mode(priv->ioaddr, tc,
1462 SF_DMA_MODE);
1450 priv->xstats.threshold = tc; 1463 priv->xstats.threshold = tc;
1451 } 1464 }
1452 } else if (unlikely(status == tx_hard_error)) 1465 } else if (unlikely(status == tx_hard_error))
@@ -2050,6 +2063,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2050 if (!priv->hwts_tx_en) 2063 if (!priv->hwts_tx_en)
2051 skb_tx_timestamp(skb); 2064 skb_tx_timestamp(skb);
2052 2065
2066 netdev_sent_queue(dev, skb->len);
2053 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 2067 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2054 2068
2055 spin_unlock(&priv->tx_lock); 2069 spin_unlock(&priv->tx_lock);
@@ -2742,7 +2756,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2742 priv->plat->enh_desc = priv->dma_cap.enh_desc; 2756 priv->plat->enh_desc = priv->dma_cap.enh_desc;
2743 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 2757 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
2744 2758
2745 priv->plat->tx_coe = priv->dma_cap.tx_coe; 2759 /* TXCOE doesn't work in thresh DMA mode */
2760 if (priv->plat->force_thresh_dma_mode)
2761 priv->plat->tx_coe = 0;
2762 else
2763 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2746 2764
2747 if (priv->dma_cap.rx_coe_type2) 2765 if (priv->dma_cap.rx_coe_type2)
2748 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 2766 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 054520d67de4..3bca908716e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -24,8 +24,50 @@
24*******************************************************************************/ 24*******************************************************************************/
25 25
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/dmi.h>
28
27#include "stmmac.h" 29#include "stmmac.h"
28 30
31/*
32 * This struct is used to associate PCI Function of MAC controller on a board,
33 * discovered via DMI, with the address of PHY connected to the MAC. The
34 * negative value of the address means that MAC controller is not connected
35 * with PHY.
36 */
37struct stmmac_pci_dmi_data {
38 const char *name;
39 unsigned int func;
40 int phy_addr;
41};
42
43struct stmmac_pci_info {
44 struct pci_dev *pdev;
45 int (*setup)(struct plat_stmmacenet_data *plat,
46 struct stmmac_pci_info *info);
47 struct stmmac_pci_dmi_data *dmi;
48};
49
50static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
51{
52 const char *name = dmi_get_system_info(DMI_BOARD_NAME);
53 unsigned int func = PCI_FUNC(info->pdev->devfn);
54 struct stmmac_pci_dmi_data *dmi;
55
56 /*
57 * Galileo boards with old firmware don't support DMI. We always return
58 * 1 here, so at least first found MAC controller would be probed.
59 */
60 if (!name)
61 return 1;
62
63 for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
64 if (!strcmp(dmi->name, name) && dmi->func == func)
65 return dmi->phy_addr;
66 }
67
68 return -ENODEV;
69}
70
29static void stmmac_default_data(struct plat_stmmacenet_data *plat) 71static void stmmac_default_data(struct plat_stmmacenet_data *plat)
30{ 72{
31 plat->bus_id = 1; 73 plat->bus_id = 1;
@@ -48,6 +90,62 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
48 plat->unicast_filter_entries = 1; 90 plat->unicast_filter_entries = 1;
49} 91}
50 92
93static int quark_default_data(struct plat_stmmacenet_data *plat,
94 struct stmmac_pci_info *info)
95{
96 struct pci_dev *pdev = info->pdev;
97 int ret;
98
99 /*
100 * Refuse to load the driver and register net device if MAC controller
101 * does not connect to any PHY interface.
102 */
103 ret = stmmac_pci_find_phy_addr(info);
104 if (ret < 0)
105 return ret;
106
107 plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
108 plat->phy_addr = ret;
109 plat->interface = PHY_INTERFACE_MODE_RMII;
110 plat->clk_csr = 2;
111 plat->has_gmac = 1;
112 plat->force_sf_dma_mode = 1;
113
114 plat->mdio_bus_data->phy_reset = NULL;
115 plat->mdio_bus_data->phy_mask = 0;
116
117 plat->dma_cfg->pbl = 16;
118 plat->dma_cfg->burst_len = DMA_AXI_BLEN_256;
119 plat->dma_cfg->fixed_burst = 1;
120
121 /* Set default value for multicast hash bins */
122 plat->multicast_filter_bins = HASH_TABLE_SIZE;
123
124 /* Set default value for unicast filter entries */
125 plat->unicast_filter_entries = 1;
126
127 return 0;
128}
129
130static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = {
131 {
132 .name = "Galileo",
133 .func = 6,
134 .phy_addr = 1,
135 },
136 {
137 .name = "GalileoGen2",
138 .func = 6,
139 .phy_addr = 1,
140 },
141 {}
142};
143
144static struct stmmac_pci_info quark_pci_info = {
145 .setup = quark_default_data,
146 .dmi = quark_pci_dmi_data,
147};
148
51/** 149/**
52 * stmmac_pci_probe 150 * stmmac_pci_probe
53 * 151 *
@@ -63,6 +161,7 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
63static int stmmac_pci_probe(struct pci_dev *pdev, 161static int stmmac_pci_probe(struct pci_dev *pdev,
64 const struct pci_device_id *id) 162 const struct pci_device_id *id)
65{ 163{
164 struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
66 struct plat_stmmacenet_data *plat; 165 struct plat_stmmacenet_data *plat;
67 struct stmmac_priv *priv; 166 struct stmmac_priv *priv;
68 int i; 167 int i;
@@ -103,7 +202,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
103 202
104 pci_set_master(pdev); 203 pci_set_master(pdev);
105 204
106 stmmac_default_data(plat); 205 if (info) {
206 info->pdev = pdev;
207 if (info->setup) {
208 ret = info->setup(plat, info);
209 if (ret)
210 return ret;
211 }
212 } else
213 stmmac_default_data(plat);
214
215 pci_enable_msi(pdev);
107 216
108 priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]); 217 priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
109 if (IS_ERR(priv)) { 218 if (IS_ERR(priv)) {
@@ -155,11 +264,13 @@ static int stmmac_pci_resume(struct device *dev)
155static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); 264static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
156 265
157#define STMMAC_VENDOR_ID 0x700 266#define STMMAC_VENDOR_ID 0x700
267#define STMMAC_QUARK_ID 0x0937
158#define STMMAC_DEVICE_ID 0x1108 268#define STMMAC_DEVICE_ID 0x1108
159 269
160static const struct pci_device_id stmmac_id_table[] = { 270static const struct pci_device_id stmmac_id_table[] = {
161 {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, 271 {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
162 {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
273 {PCI_VDEVICE(INTEL, STMMAC_QUARK_ID), (kernel_ulong_t)&quark_pci_info},
163 {} 274 {}
164}; 275};
165 276
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3039de2465ba..fb846ebba1d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -33,6 +33,7 @@
33 33
34static const struct of_device_id stmmac_dt_ids[] = { 34static const struct of_device_id stmmac_dt_ids[] = {
35 /* SoC specific glue layers should come before generic bindings */ 35 /* SoC specific glue layers should come before generic bindings */
36 { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
36 { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data}, 37 { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
37 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, 38 { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
38 { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data}, 39 { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
@@ -234,6 +235,9 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
234 of_property_read_bool(np, "snps,fixed-burst"); 235 of_property_read_bool(np, "snps,fixed-burst");
235 dma_cfg->mixed_burst = 236 dma_cfg->mixed_burst =
236 of_property_read_bool(np, "snps,mixed-burst"); 237 of_property_read_bool(np, "snps,mixed-burst");
238 of_property_read_u32(np, "snps,burst_len", &dma_cfg->burst_len);
239 if (dma_cfg->burst_len < 0 || dma_cfg->burst_len > 256)
240 dma_cfg->burst_len = 0;
237 } 241 }
238 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); 242 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
239 if (plat->force_thresh_dma_mode) { 243 if (plat->force_thresh_dma_mode) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 25dd1f7ace02..093eb99e5ffd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -24,5 +24,6 @@ extern const struct stmmac_of_data sun7i_gmac_data;
24extern const struct stmmac_of_data stih4xx_dwmac_data; 24extern const struct stmmac_of_data stih4xx_dwmac_data;
25extern const struct stmmac_of_data stid127_dwmac_data; 25extern const struct stmmac_of_data stid127_dwmac_data;
26extern const struct stmmac_of_data socfpga_gmac_data; 26extern const struct stmmac_of_data socfpga_gmac_data;
27extern const struct stmmac_of_data rk3288_gmac_data;
27 28
28#endif /* __STMMAC_PLATFORM_H__ */ 29#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c6416213837..4b51f903fb73 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3341,8 +3341,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3341 3341
3342 niu_hash_page(rp, page, addr); 3342 niu_hash_page(rp, page, addr);
3343 if (rp->rbr_blocks_per_page > 1) 3343 if (rp->rbr_blocks_per_page > 1)
3344 atomic_add(rp->rbr_blocks_per_page - 1, 3344 atomic_add(rp->rbr_blocks_per_page - 1, &page->_count);
3345 &compound_head(page)->_count);
3346 3345
3347 for (i = 0; i < rp->rbr_blocks_per_page; i++) { 3346 for (i = 0; i < rp->rbr_blocks_per_page; i++) {
3348 __le32 *rbr = &rp->rbr[start_index + i]; 3347 __le32 *rbr = &rp->rbr[start_index + i];
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3699b98d5b2c..2b10b85d8a08 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -50,6 +50,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
50#define VNET_MAX_RETRIES 10 50#define VNET_MAX_RETRIES 10
51 51
52static int __vnet_tx_trigger(struct vnet_port *port, u32 start); 52static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
53static void vnet_port_reset(struct vnet_port *port);
53 54
54/* Ordered from largest major to lowest */ 55/* Ordered from largest major to lowest */
55static struct vio_version vnet_versions[] = { 56static struct vio_version vnet_versions[] = {
@@ -351,10 +352,15 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
351 unsigned int len = desc->size; 352 unsigned int len = desc->size;
352 unsigned int copy_len; 353 unsigned int copy_len;
353 struct sk_buff *skb; 354 struct sk_buff *skb;
355 int maxlen;
354 int err; 356 int err;
355 357
356 err = -EMSGSIZE; 358 err = -EMSGSIZE;
357 if (unlikely(len < ETH_ZLEN || len > port->rmtu)) { 359 if (port->tso && port->tsolen > port->rmtu)
360 maxlen = port->tsolen;
361 else
362 maxlen = port->rmtu;
363 if (unlikely(len < ETH_ZLEN || len > maxlen)) {
358 dev->stats.rx_length_errors++; 364 dev->stats.rx_length_errors++;
359 goto out_dropped; 365 goto out_dropped;
360 } 366 }
@@ -731,9 +737,7 @@ ldc_ctrl:
731 vio_link_state_change(vio, event); 737 vio_link_state_change(vio, event);
732 738
733 if (event == LDC_EVENT_RESET) { 739 if (event == LDC_EVENT_RESET) {
734 port->rmtu = 0; 740 vnet_port_reset(port);
735 port->tso = true;
736 port->tsolen = 0;
737 vio_port_up(vio); 741 vio_port_up(vio);
738 } 742 }
739 port->rx_event = 0; 743 port->rx_event = 0;
@@ -929,36 +933,36 @@ static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
929 933
930 *pending = 0; 934 *pending = 0;
931 935
932 txi = dr->prod-1; 936 txi = dr->prod;
933 if (txi < 0)
934 txi = VNET_TX_RING_SIZE-1;
935
936 for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 937 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
937 struct vio_net_desc *d; 938 struct vio_net_desc *d;
938 939
939 d = vio_dring_entry(dr, txi); 940 --txi;
940 941 if (txi < 0)
941 if (d->hdr.state == VIO_DESC_DONE) { 942 txi = VNET_TX_RING_SIZE-1;
942 if (port->tx_bufs[txi].skb) {
943 BUG_ON(port->tx_bufs[txi].skb->next);
944 943
945 port->tx_bufs[txi].skb->next = skb; 944 d = vio_dring_entry(dr, txi);
946 skb = port->tx_bufs[txi].skb;
947 port->tx_bufs[txi].skb = NULL;
948 945
949 ldc_unmap(port->vio.lp, 946 if (d->hdr.state == VIO_DESC_READY) {
950 port->tx_bufs[txi].cookies,
951 port->tx_bufs[txi].ncookies);
952 }
953 d->hdr.state = VIO_DESC_FREE;
954 } else if (d->hdr.state == VIO_DESC_READY) {
955 (*pending)++; 947 (*pending)++;
956 } else if (d->hdr.state == VIO_DESC_FREE) { 948 continue;
957 break;
958 } 949 }
959 --txi; 950 if (port->tx_bufs[txi].skb) {
960 if (txi < 0) 951 if (d->hdr.state != VIO_DESC_DONE)
961 txi = VNET_TX_RING_SIZE-1; 952 pr_notice("invalid ring buffer state %d\n",
953 d->hdr.state);
954 BUG_ON(port->tx_bufs[txi].skb->next);
955
956 port->tx_bufs[txi].skb->next = skb;
957 skb = port->tx_bufs[txi].skb;
958 port->tx_bufs[txi].skb = NULL;
959
960 ldc_unmap(port->vio.lp,
961 port->tx_bufs[txi].cookies,
962 port->tx_bufs[txi].ncookies);
963 } else if (d->hdr.state == VIO_DESC_FREE)
964 break;
965 d->hdr.state = VIO_DESC_FREE;
962 } 966 }
963 return skb; 967 return skb;
964} 968}
@@ -1633,16 +1637,9 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
1633 int i; 1637 int i;
1634 1638
1635 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 1639 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1636 if (dr->base) { 1640
1637 ldc_free_exp_dring(port->vio.lp, dr->base, 1641 if (dr->base == NULL)
1638 (dr->entry_size * dr->num_entries), 1642 return;
1639 dr->cookies, dr->ncookies);
1640 dr->base = NULL;
1641 dr->entry_size = 0;
1642 dr->num_entries = 0;
1643 dr->pending = 0;
1644 dr->ncookies = 0;
1645 }
1646 1643
1647 for (i = 0; i < VNET_TX_RING_SIZE; i++) { 1644 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1648 struct vio_net_desc *d; 1645 struct vio_net_desc *d;
@@ -1652,8 +1649,6 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
1652 continue; 1649 continue;
1653 1650
1654 d = vio_dring_entry(dr, i); 1651 d = vio_dring_entry(dr, i);
1655 if (d->hdr.state == VIO_DESC_READY)
1656 pr_warn("active transmit buffers freed\n");
1657 1652
1658 ldc_unmap(port->vio.lp, 1653 ldc_unmap(port->vio.lp,
1659 port->tx_bufs[i].cookies, 1654 port->tx_bufs[i].cookies,
@@ -1662,6 +1657,23 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
1662 port->tx_bufs[i].skb = NULL; 1657 port->tx_bufs[i].skb = NULL;
1663 d->hdr.state = VIO_DESC_FREE; 1658 d->hdr.state = VIO_DESC_FREE;
1664 } 1659 }
1660 ldc_free_exp_dring(port->vio.lp, dr->base,
1661 (dr->entry_size * dr->num_entries),
1662 dr->cookies, dr->ncookies);
1663 dr->base = NULL;
1664 dr->entry_size = 0;
1665 dr->num_entries = 0;
1666 dr->pending = 0;
1667 dr->ncookies = 0;
1668}
1669
1670static void vnet_port_reset(struct vnet_port *port)
1671{
1672 del_timer(&port->clean_timer);
1673 vnet_port_free_tx_bufs(port);
1674 port->rmtu = 0;
1675 port->tso = true;
1676 port->tsolen = 0;
1665} 1677}
1666 1678
1667static int vnet_port_alloc_tx_ring(struct vnet_port *port) 1679static int vnet_port_alloc_tx_ring(struct vnet_port *port)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 6ab36d9ff2ab..a9cac8413e49 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1650,9 +1650,9 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1650 txd_mss); 1650 txd_mss);
1651 } 1651 }
1652 1652
1653 if (vlan_tx_tag_present(skb)) { 1653 if (skb_vlan_tag_present(skb)) {
1654 /*Cut VLAN ID to 12 bits */ 1654 /*Cut VLAN ID to 12 bits */
1655 txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12); 1655 txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
1656 txd_vtag = 1; 1656 txd_vtag = 1;
1657 } 1657 }
1658 1658
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 605dd909bcc3..3bc992cd70b7 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -56,12 +56,18 @@ config TI_CPSW_PHY_SEL
56 This driver supports configuring of the phy mode connected to 56 This driver supports configuring of the phy mode connected to
57 the CPSW. 57 the CPSW.
58 58
59config TI_CPSW_ALE
60 tristate "TI CPSW ALE Support"
61 ---help---
62 This driver supports TI's CPSW ALE module.
63
59config TI_CPSW 64config TI_CPSW
60 tristate "TI CPSW Switch Support" 65 tristate "TI CPSW Switch Support"
61 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS 66 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS
62 select TI_DAVINCI_CPDMA 67 select TI_DAVINCI_CPDMA
63 select TI_DAVINCI_MDIO 68 select TI_DAVINCI_MDIO
64 select TI_CPSW_PHY_SEL 69 select TI_CPSW_PHY_SEL
70 select TI_CPSW_ALE
65 select MFD_SYSCON 71 select MFD_SYSCON
66 select REGMAP 72 select REGMAP
67 ---help--- 73 ---help---
@@ -79,6 +85,25 @@ config TI_CPTS
79 the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4 85 the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
80 and Layer 2 packets, and the driver offers a PTP Hardware Clock. 86 and Layer 2 packets, and the driver offers a PTP Hardware Clock.
81 87
88config TI_KEYSTONE_NETCP
89 tristate "TI Keystone NETCP Core Support"
90 select TI_CPSW_ALE
91 depends on OF
92 depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
93 ---help---
94 This driver supports TI's Keystone NETCP Core.
95
96 To compile this driver as a module, choose M here: the module
97 will be called keystone_netcp.
98
99config TI_KEYSTONE_NETCP_ETHSS
100 depends on TI_KEYSTONE_NETCP
101 tristate "TI Keystone NETCP Ethernet subsystem Support"
102 ---help---
103
104 To compile this driver as a module, choose M here: the module
105 will be called keystone_netcp_ethss.
106
82config TLAN 107config TLAN
83 tristate "TI ThunderLAN support" 108 tristate "TI ThunderLAN support"
84 depends on (PCI || EISA) 109 depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 9cfaab8152be..d420d9413e4a 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -2,11 +2,20 @@
2# Makefile for the TI network device drivers. 2# Makefile for the TI network device drivers.
3# 3#
4 4
5obj-$(CONFIG_TI_CPSW) += cpsw-common.o
6obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
7
5obj-$(CONFIG_TLAN) += tlan.o 8obj-$(CONFIG_TLAN) += tlan.o
6obj-$(CONFIG_CPMAC) += cpmac.o 9obj-$(CONFIG_CPMAC) += cpmac.o
7obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o 10obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
8obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o 11obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
9obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o 12obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
10obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o 13obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
14obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
11obj-$(CONFIG_TI_CPSW) += ti_cpsw.o 15obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
12ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o 16ti_cpsw-y := cpsw.o cpts.o
17
18obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
19keystone_netcp-y := netcp_core.o
20obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
21keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
new file mode 100644
index 000000000000..f59509486113
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -0,0 +1,55 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/of.h>
16#include <linux/of_device.h>
17#include <linux/regmap.h>
18#include <linux/mfd/syscon.h>
19
20#include "cpsw.h"
21
22#define AM33XX_CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
23#define AM33XX_CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
24
25int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
26 u8 *mac_addr)
27{
28 u32 macid_lo;
29 u32 macid_hi;
30 struct regmap *syscon;
31
32 syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
33 if (IS_ERR(syscon)) {
34 if (PTR_ERR(syscon) == -ENODEV)
35 return 0;
36 return PTR_ERR(syscon);
37 }
38
39 regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(offset, slave),
40 &macid_lo);
41 regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(offset, slave),
42 &macid_hi);
43
44 mac_addr[5] = (macid_lo >> 8) & 0xff;
45 mac_addr[4] = macid_lo & 0xff;
46 mac_addr[3] = (macid_hi >> 24) & 0xff;
47 mac_addr[2] = (macid_hi >> 16) & 0xff;
48 mac_addr[1] = (macid_hi >> 8) & 0xff;
49 mac_addr[0] = macid_hi & 0xff;
50
51 return 0;
52}
53EXPORT_SYMBOL_GPL(cpsw_am33xx_cm_get_macid);
54
55MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a39131f494ec..7d8dd0d2182e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -33,8 +33,6 @@
33#include <linux/of_net.h> 33#include <linux/of_net.h>
34#include <linux/of_device.h> 34#include <linux/of_device.h>
35#include <linux/if_vlan.h> 35#include <linux/if_vlan.h>
36#include <linux/mfd/syscon.h>
37#include <linux/regmap.h>
38 36
39#include <linux/pinctrl/consumer.h> 37#include <linux/pinctrl/consumer.h>
40 38
@@ -761,17 +759,25 @@ requeue:
761 dev_kfree_skb_any(new_skb); 759 dev_kfree_skb_any(new_skb);
762} 760}
763 761
764static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 762static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
765{ 763{
766 struct cpsw_priv *priv = dev_id; 764 struct cpsw_priv *priv = dev_id;
767 int value = irq - priv->irqs_table[0];
768 765
769 /* NOTICE: Ending IRQ here. The trick with the 'value' variable above 766 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
770 * is to make sure we will always write the correct value to the EOI 767 cpdma_chan_process(priv->txch, 128);
771 * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2 768
772 * for TX Interrupt and 3 for MISC Interrupt. 769 priv = cpsw_get_slave_priv(priv, 1);
773 */ 770 if (priv)
774 cpdma_ctlr_eoi(priv->dma, value); 771 cpdma_chan_process(priv->txch, 128);
772
773 return IRQ_HANDLED;
774}
775
776static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
777{
778 struct cpsw_priv *priv = dev_id;
779
780 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
775 781
776 cpsw_intr_disable(priv); 782 cpsw_intr_disable(priv);
777 if (priv->irq_enabled == true) { 783 if (priv->irq_enabled == true) {
@@ -1624,7 +1630,8 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
1624 1630
1625 cpsw_intr_disable(priv); 1631 cpsw_intr_disable(priv);
1626 cpdma_ctlr_int_ctrl(priv->dma, false); 1632 cpdma_ctlr_int_ctrl(priv->dma, false);
1627 cpsw_interrupt(ndev->irq, priv); 1633 cpsw_rx_interrupt(priv->irqs_table[0], priv);
1634 cpsw_tx_interrupt(priv->irqs_table[1], priv);
1628 cpdma_ctlr_int_ctrl(priv->dma, true); 1635 cpdma_ctlr_int_ctrl(priv->dma, true);
1629 cpsw_intr_enable(priv); 1636 cpsw_intr_enable(priv);
1630} 1637}
@@ -1927,36 +1934,6 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
1927 slave->port_vlan = data->dual_emac_res_vlan; 1934 slave->port_vlan = data->dual_emac_res_vlan;
1928} 1935}
1929 1936
1930#define AM33XX_CTRL_MAC_LO_REG(id) (0x630 + 0x8 * id)
1931#define AM33XX_CTRL_MAC_HI_REG(id) (0x630 + 0x8 * id + 0x4)
1932
1933static int cpsw_am33xx_cm_get_macid(struct device *dev, int slave,
1934 u8 *mac_addr)
1935{
1936 u32 macid_lo;
1937 u32 macid_hi;
1938 struct regmap *syscon;
1939
1940 syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
1941 if (IS_ERR(syscon)) {
1942 if (PTR_ERR(syscon) == -ENODEV)
1943 return 0;
1944 return PTR_ERR(syscon);
1945 }
1946
1947 regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(slave), &macid_lo);
1948 regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(slave), &macid_hi);
1949
1950 mac_addr[5] = (macid_lo >> 8) & 0xff;
1951 mac_addr[4] = macid_lo & 0xff;
1952 mac_addr[3] = (macid_hi >> 24) & 0xff;
1953 mac_addr[2] = (macid_hi >> 16) & 0xff;
1954 mac_addr[1] = (macid_hi >> 8) & 0xff;
1955 mac_addr[0] = macid_hi & 0xff;
1956
1957 return 0;
1958}
1959
1960static int cpsw_probe_dt(struct cpsw_platform_data *data, 1937static int cpsw_probe_dt(struct cpsw_platform_data *data,
1961 struct platform_device *pdev) 1938 struct platform_device *pdev)
1962{ 1939{
@@ -2081,7 +2058,8 @@ no_phy_slave:
2081 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 2058 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
2082 } else { 2059 } else {
2083 if (of_machine_is_compatible("ti,am33xx")) { 2060 if (of_machine_is_compatible("ti,am33xx")) {
2084 ret = cpsw_am33xx_cm_get_macid(&pdev->dev, i, 2061 ret = cpsw_am33xx_cm_get_macid(&pdev->dev,
2062 0x630, i,
2085 slave_data->mac_addr); 2063 slave_data->mac_addr);
2086 if (ret) 2064 if (ret)
2087 return ret; 2065 return ret;
@@ -2192,7 +2170,8 @@ static int cpsw_probe(struct platform_device *pdev)
2192 void __iomem *ss_regs; 2170 void __iomem *ss_regs;
2193 struct resource *res, *ss_res; 2171 struct resource *res, *ss_res;
2194 u32 slave_offset, sliver_offset, slave_size; 2172 u32 slave_offset, sliver_offset, slave_size;
2195 int ret = 0, i, k = 0; 2173 int ret = 0, i;
2174 int irq;
2196 2175
2197 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 2176 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
2198 if (!ndev) { 2177 if (!ndev) {
@@ -2374,31 +2353,47 @@ static int cpsw_probe(struct platform_device *pdev)
2374 goto clean_dma_ret; 2353 goto clean_dma_ret;
2375 } 2354 }
2376 2355
2377 ndev->irq = platform_get_irq(pdev, 0); 2356 ndev->irq = platform_get_irq(pdev, 1);
2378 if (ndev->irq < 0) { 2357 if (ndev->irq < 0) {
2379 dev_err(priv->dev, "error getting irq resource\n"); 2358 dev_err(priv->dev, "error getting irq resource\n");
2380 ret = -ENOENT; 2359 ret = -ENOENT;
2381 goto clean_ale_ret; 2360 goto clean_ale_ret;
2382 } 2361 }
2383 2362
2384 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 2363 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
2385 if (k >= ARRAY_SIZE(priv->irqs_table)) { 2364 * MISC IRQs which are always kept disabled with this driver so
2386 ret = -EINVAL; 2365 * we will not request them.
2387 goto clean_ale_ret; 2366 *
2388 } 2367 * If anyone wants to implement support for those, make sure to
2368 * first request and append them to irqs_table array.
2369 */
2389 2370
2390 ret = devm_request_irq(&pdev->dev, res->start, cpsw_interrupt, 2371 /* RX IRQ */
2391 0, dev_name(&pdev->dev), priv); 2372 irq = platform_get_irq(pdev, 1);
2392 if (ret < 0) { 2373 if (irq < 0)
2393 dev_err(priv->dev, "error attaching irq (%d)\n", ret); 2374 goto clean_ale_ret;
2394 goto clean_ale_ret;
2395 }
2396 2375
2397 priv->irqs_table[k] = res->start; 2376 priv->irqs_table[0] = irq;
2398 k++; 2377 ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
2378 0, dev_name(&pdev->dev), priv);
2379 if (ret < 0) {
2380 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
2381 goto clean_ale_ret;
2399 } 2382 }
2400 2383
2401 priv->num_irqs = k; 2384 /* TX IRQ */
2385 irq = platform_get_irq(pdev, 2);
2386 if (irq < 0)
2387 goto clean_ale_ret;
2388
2389 priv->irqs_table[1] = irq;
2390 ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
2391 0, dev_name(&pdev->dev), priv);
2392 if (ret < 0) {
2393 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
2394 goto clean_ale_ret;
2395 }
2396 priv->num_irqs = 2;
2402 2397
2403 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2398 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2404 2399
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 1b710674630c..ca90efafd156 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -41,5 +41,7 @@ struct cpsw_platform_data {
41}; 41};
42 42
43void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave); 43void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
44int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
45 u8 *mac_addr);
44 46
45#endif /* __CPSW_H__ */ 47#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 5246b3a18ff8..6e927b4583aa 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -13,6 +13,7 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/module.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17#include <linux/seq_file.h> 18#include <linux/seq_file.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
@@ -146,7 +147,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
146 return idx; 147 return idx;
147} 148}
148 149
149int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) 150static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
150{ 151{
151 u32 ale_entry[ALE_ENTRY_WORDS]; 152 u32 ale_entry[ALE_ENTRY_WORDS];
152 int type, idx; 153 int type, idx;
@@ -167,7 +168,7 @@ int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
167 return -ENOENT; 168 return -ENOENT;
168} 169}
169 170
170int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid) 171static int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
171{ 172{
172 u32 ale_entry[ALE_ENTRY_WORDS]; 173 u32 ale_entry[ALE_ENTRY_WORDS];
173 int type, idx; 174 int type, idx;
@@ -265,6 +266,7 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
265 } 266 }
266 return 0; 267 return 0;
267} 268}
269EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
268 270
269static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry, 271static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
270 int port_mask) 272 int port_mask)
@@ -297,6 +299,7 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
297 } 299 }
298 return 0; 300 return 0;
299} 301}
302EXPORT_SYMBOL_GPL(cpsw_ale_flush);
300 303
301static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry, 304static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
302 int flags, u16 vid) 305 int flags, u16 vid)
@@ -334,6 +337,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
334 cpsw_ale_write(ale, idx, ale_entry); 337 cpsw_ale_write(ale, idx, ale_entry);
335 return 0; 338 return 0;
336} 339}
340EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
337 341
338int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, 342int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
339 int flags, u16 vid) 343 int flags, u16 vid)
@@ -349,6 +353,7 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
349 cpsw_ale_write(ale, idx, ale_entry); 353 cpsw_ale_write(ale, idx, ale_entry);
350 return 0; 354 return 0;
351} 355}
356EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
352 357
353int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, 358int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
354 int flags, u16 vid, int mcast_state) 359 int flags, u16 vid, int mcast_state)
@@ -380,6 +385,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
380 cpsw_ale_write(ale, idx, ale_entry); 385 cpsw_ale_write(ale, idx, ale_entry);
381 return 0; 386 return 0;
382} 387}
388EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
383 389
384int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, 390int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
385 int flags, u16 vid) 391 int flags, u16 vid)
@@ -401,6 +407,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
401 cpsw_ale_write(ale, idx, ale_entry); 407 cpsw_ale_write(ale, idx, ale_entry);
402 return 0; 408 return 0;
403} 409}
410EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
404 411
405int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, 412int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
406 int reg_mcast, int unreg_mcast) 413 int reg_mcast, int unreg_mcast)
@@ -430,6 +437,7 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
430 cpsw_ale_write(ale, idx, ale_entry); 437 cpsw_ale_write(ale, idx, ale_entry);
431 return 0; 438 return 0;
432} 439}
440EXPORT_SYMBOL_GPL(cpsw_ale_add_vlan);
433 441
434int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) 442int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
435{ 443{
@@ -450,6 +458,7 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
450 cpsw_ale_write(ale, idx, ale_entry); 458 cpsw_ale_write(ale, idx, ale_entry);
451 return 0; 459 return 0;
452} 460}
461EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan);
453 462
454void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) 463void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
455{ 464{
@@ -479,6 +488,7 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
479 cpsw_ale_write(ale, idx, ale_entry); 488 cpsw_ale_write(ale, idx, ale_entry);
480 } 489 }
481} 490}
491EXPORT_SYMBOL_GPL(cpsw_ale_set_allmulti);
482 492
483struct ale_control_info { 493struct ale_control_info {
484 const char *name; 494 const char *name;
@@ -704,6 +714,7 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
704 714
705 return 0; 715 return 0;
706} 716}
717EXPORT_SYMBOL_GPL(cpsw_ale_control_set);
707 718
708int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control) 719int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
709{ 720{
@@ -727,6 +738,7 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
727 tmp = __raw_readl(ale->params.ale_regs + offset) >> shift; 738 tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
728 return tmp & BITMASK(info->bits); 739 return tmp & BITMASK(info->bits);
729} 740}
741EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
730 742
731static void cpsw_ale_timer(unsigned long arg) 743static void cpsw_ale_timer(unsigned long arg)
732{ 744{
@@ -750,6 +762,7 @@ int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
750 } 762 }
751 return 0; 763 return 0;
752} 764}
765EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout);
753 766
754void cpsw_ale_start(struct cpsw_ale *ale) 767void cpsw_ale_start(struct cpsw_ale *ale)
755{ 768{
@@ -769,11 +782,13 @@ void cpsw_ale_start(struct cpsw_ale *ale)
769 add_timer(&ale->timer); 782 add_timer(&ale->timer);
770 } 783 }
771} 784}
785EXPORT_SYMBOL_GPL(cpsw_ale_start);
772 786
773void cpsw_ale_stop(struct cpsw_ale *ale) 787void cpsw_ale_stop(struct cpsw_ale *ale)
774{ 788{
775 del_timer_sync(&ale->timer); 789 del_timer_sync(&ale->timer);
776} 790}
791EXPORT_SYMBOL_GPL(cpsw_ale_stop);
777 792
778struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) 793struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
779{ 794{
@@ -788,6 +803,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
788 803
789 return ale; 804 return ale;
790} 805}
806EXPORT_SYMBOL_GPL(cpsw_ale_create);
791 807
792int cpsw_ale_destroy(struct cpsw_ale *ale) 808int cpsw_ale_destroy(struct cpsw_ale *ale)
793{ 809{
@@ -797,6 +813,7 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
797 kfree(ale); 813 kfree(ale);
798 return 0; 814 return 0;
799} 815}
816EXPORT_SYMBOL_GPL(cpsw_ale_destroy);
800 817
801void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data) 818void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
802{ 819{
@@ -807,3 +824,8 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
807 data += ALE_ENTRY_WORDS; 824 data += ALE_ENTRY_WORDS;
808 } 825 }
809} 826}
827EXPORT_SYMBOL_GPL(cpsw_ale_dump);
828
829MODULE_LICENSE("GPL v2");
830MODULE_DESCRIPTION("TI CPSW ALE driver");
831MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 4a4388b813ac..fbe42cb107ec 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -157,14 +157,11 @@ static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
157 157
158static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 158static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
159{ 159{
160 s64 now;
161 unsigned long flags; 160 unsigned long flags;
162 struct cpts *cpts = container_of(ptp, struct cpts, info); 161 struct cpts *cpts = container_of(ptp, struct cpts, info);
163 162
164 spin_lock_irqsave(&cpts->lock, flags); 163 spin_lock_irqsave(&cpts->lock, flags);
165 now = timecounter_read(&cpts->tc); 164 timecounter_adjtime(&cpts->tc, delta);
166 now += delta;
167 timecounter_init(&cpts->tc, &cpts->cc, now);
168 spin_unlock_irqrestore(&cpts->lock, flags); 165 spin_unlock_irqrestore(&cpts->lock, flags);
169 166
170 return 0; 167 return 0;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 1a581ef7eee8..69a46b92c7d6 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -27,6 +27,7 @@
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/ptp_clock_kernel.h> 28#include <linux/ptp_clock_kernel.h>
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/timecounter.h>
30 31
31struct cpsw_cpts { 32struct cpsw_cpts {
32 u32 idver; /* Identification and version */ 33 u32 idver; /* Identification and version */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5fae4354722c..aeebc0a7bf47 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -52,6 +52,7 @@
52#include <linux/dma-mapping.h> 52#include <linux/dma-mapping.h>
53#include <linux/clk.h> 53#include <linux/clk.h>
54#include <linux/platform_device.h> 54#include <linux/platform_device.h>
55#include <linux/regmap.h>
55#include <linux/semaphore.h> 56#include <linux/semaphore.h>
56#include <linux/phy.h> 57#include <linux/phy.h>
57#include <linux/bitops.h> 58#include <linux/bitops.h>
@@ -65,10 +66,12 @@
65#include <linux/of_mdio.h> 66#include <linux/of_mdio.h>
66#include <linux/of_irq.h> 67#include <linux/of_irq.h>
67#include <linux/of_net.h> 68#include <linux/of_net.h>
69#include <linux/mfd/syscon.h>
68 70
69#include <asm/irq.h> 71#include <asm/irq.h>
70#include <asm/page.h> 72#include <asm/page.h>
71 73
74#include "cpsw.h"
72#include "davinci_cpdma.h" 75#include "davinci_cpdma.h"
73 76
74static int debug_level; 77static int debug_level;
@@ -1838,7 +1841,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1838 if (!is_valid_ether_addr(pdata->mac_addr)) { 1841 if (!is_valid_ether_addr(pdata->mac_addr)) {
1839 mac_addr = of_get_mac_address(np); 1842 mac_addr = of_get_mac_address(np);
1840 if (mac_addr) 1843 if (mac_addr)
1841 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); 1844 ether_addr_copy(pdata->mac_addr, mac_addr);
1842 } 1845 }
1843 1846
1844 of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", 1847 of_property_read_u32(np, "ti,davinci-ctrl-reg-offset",
@@ -1879,6 +1882,53 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1879 return pdata; 1882 return pdata;
1880} 1883}
1881 1884
1885static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
1886 int slave, u8 *mac_addr)
1887{
1888 u32 macid_lsb;
1889 u32 macid_msb;
1890 struct regmap *syscon;
1891
1892 syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
1893 if (IS_ERR(syscon)) {
1894 if (PTR_ERR(syscon) == -ENODEV)
1895 return 0;
1896 return PTR_ERR(syscon);
1897 }
1898
1899 regmap_read(syscon, offset, &macid_lsb);
1900 regmap_read(syscon, offset + 4, &macid_msb);
1901
1902 mac_addr[0] = (macid_msb >> 16) & 0xff;
1903 mac_addr[1] = (macid_msb >> 8) & 0xff;
1904 mac_addr[2] = macid_msb & 0xff;
1905 mac_addr[3] = (macid_lsb >> 16) & 0xff;
1906 mac_addr[4] = (macid_lsb >> 8) & 0xff;
1907 mac_addr[5] = macid_lsb & 0xff;
1908
1909 return 0;
1910}
1911
1912static int davinci_emac_try_get_mac(struct platform_device *pdev,
1913 int instance, u8 *mac_addr)
1914{
1915 int error = -EINVAL;
1916
1917 if (!pdev->dev.of_node)
1918 return error;
1919
1920 if (of_device_is_compatible(pdev->dev.of_node, "ti,am3517-emac"))
1921 error = davinci_emac_3517_get_macid(&pdev->dev, 0x110,
1922 0, mac_addr);
1923 else if (of_device_is_compatible(pdev->dev.of_node,
1924 "ti,dm816-emac"))
1925 error = cpsw_am33xx_cm_get_macid(&pdev->dev, 0x30,
1926 instance,
1927 mac_addr);
1928
1929 return error;
1930}
1931
1882/** 1932/**
1883 * davinci_emac_probe - EMAC device probe 1933 * davinci_emac_probe - EMAC device probe
1884 * @pdev: The DaVinci EMAC device that we are removing 1934 * @pdev: The DaVinci EMAC device that we are removing
@@ -2009,6 +2059,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
2009 } 2059 }
2010 ndev->irq = res->start; 2060 ndev->irq = res->start;
2011 2061
2062 rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
2063 if (!rc)
2064 ether_addr_copy(ndev->dev_addr, priv->mac_addr);
2065
2012 if (!is_valid_ether_addr(priv->mac_addr)) { 2066 if (!is_valid_ether_addr(priv->mac_addr)) {
2013 /* Use random MAC if none passed */ 2067 /* Use random MAC if none passed */
2014 eth_hw_addr_random(ndev); 2068 eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
new file mode 100644
index 000000000000..906e9bc412f5
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -0,0 +1,229 @@
1/*
2 * NetCP driver local header
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Wingman Kwok <w-kwok2@ti.com>
10 * Murali Karicheri <m-karicheri2@ti.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
15 *
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21#ifndef __NETCP_H__
22#define __NETCP_H__
23
24#include <linux/netdevice.h>
25#include <linux/soc/ti/knav_dma.h>
26
27/* Maximum Ethernet frame size supported by Keystone switch */
28#define NETCP_MAX_FRAME_SIZE 9504
29
30#define SGMII_LINK_MAC_MAC_AUTONEG 0
31#define SGMII_LINK_MAC_PHY 1
32#define SGMII_LINK_MAC_MAC_FORCED 2
33#define SGMII_LINK_MAC_FIBER 3
34#define SGMII_LINK_MAC_PHY_NO_MDIO 4
35#define XGMII_LINK_MAC_PHY 10
36#define XGMII_LINK_MAC_MAC_FORCED 11
37
38struct netcp_device;
39
40struct netcp_tx_pipe {
41 struct netcp_device *netcp_device;
42 void *dma_queue;
43 unsigned int dma_queue_id;
44 u8 dma_psflags;
45 void *dma_channel;
46 const char *dma_chan_name;
47};
48
49#define ADDR_NEW BIT(0)
50#define ADDR_VALID BIT(1)
51
52enum netcp_addr_type {
53 ADDR_ANY,
54 ADDR_DEV,
55 ADDR_UCAST,
56 ADDR_MCAST,
57 ADDR_BCAST
58};
59
60struct netcp_addr {
61 struct netcp_intf *netcp;
62 unsigned char addr[ETH_ALEN];
63 enum netcp_addr_type type;
64 unsigned int flags;
65 struct list_head node;
66};
67
68struct netcp_intf {
69 struct device *dev;
70 struct device *ndev_dev;
71 struct net_device *ndev;
72 bool big_endian;
73 unsigned int tx_compl_qid;
74 void *tx_pool;
75 struct list_head txhook_list_head;
76 unsigned int tx_pause_threshold;
77 void *tx_compl_q;
78
79 unsigned int tx_resume_threshold;
80 void *rx_queue;
81 void *rx_pool;
82 struct list_head rxhook_list_head;
83 unsigned int rx_queue_id;
84 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
85 u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
86 struct napi_struct rx_napi;
87 struct napi_struct tx_napi;
88
89 void *rx_channel;
90 const char *dma_chan_name;
91 u32 rx_pool_size;
92 u32 rx_pool_region_id;
93 u32 tx_pool_size;
94 u32 tx_pool_region_id;
95 struct list_head module_head;
96 struct list_head interface_list;
97 struct list_head addr_list;
98 bool netdev_registered;
99 bool primary_module_attached;
100
101 /* Lock used for protecting Rx/Tx hook list management */
102 spinlock_t lock;
103 struct netcp_device *netcp_device;
104 struct device_node *node_interface;
105
106 /* DMA configuration data */
107 u32 msg_enable;
108 u32 rx_queue_depths[KNAV_DMA_FDQ_PER_CHAN];
109};
110
111#define NETCP_PSDATA_LEN KNAV_DMA_NUM_PS_WORDS
112struct netcp_packet {
113 struct sk_buff *skb;
114 u32 *epib;
115 u32 *psdata;
116 unsigned int psdata_len;
117 struct netcp_intf *netcp;
118 struct netcp_tx_pipe *tx_pipe;
119 bool rxtstamp_complete;
120 void *ts_context;
121
122 int (*txtstamp_complete)(void *ctx, struct netcp_packet *pkt);
123};
124
125static inline u32 *netcp_push_psdata(struct netcp_packet *p_info,
126 unsigned int bytes)
127{
128 u32 *buf;
129 unsigned int words;
130
131 if ((bytes & 0x03) != 0)
132 return NULL;
133 words = bytes >> 2;
134
135 if ((p_info->psdata_len + words) > NETCP_PSDATA_LEN)
136 return NULL;
137
138 p_info->psdata_len += words;
139 buf = &p_info->psdata[NETCP_PSDATA_LEN - p_info->psdata_len];
140 return buf;
141}
142
143static inline int netcp_align_psdata(struct netcp_packet *p_info,
144 unsigned int byte_align)
145{
146 int padding;
147
148 switch (byte_align) {
149 case 0:
150 padding = -EINVAL;
151 break;
152 case 1:
153 case 2:
154 case 4:
155 padding = 0;
156 break;
157 case 8:
158 padding = (p_info->psdata_len << 2) % 8;
159 break;
160 case 16:
161 padding = (p_info->psdata_len << 2) % 16;
162 break;
163 default:
164 padding = (p_info->psdata_len << 2) % byte_align;
165 break;
166 }
167 return padding;
168}
169
170struct netcp_module {
171 const char *name;
172 struct module *owner;
173 bool primary;
174
175 /* probe/remove: called once per NETCP instance */
176 int (*probe)(struct netcp_device *netcp_device,
177 struct device *device, struct device_node *node,
178 void **inst_priv);
179 int (*remove)(struct netcp_device *netcp_device, void *inst_priv);
180
181 /* attach/release: called once per network interface */
182 int (*attach)(void *inst_priv, struct net_device *ndev,
183 struct device_node *node, void **intf_priv);
184 int (*release)(void *intf_priv);
185 int (*open)(void *intf_priv, struct net_device *ndev);
186 int (*close)(void *intf_priv, struct net_device *ndev);
187 int (*add_addr)(void *intf_priv, struct netcp_addr *naddr);
188 int (*del_addr)(void *intf_priv, struct netcp_addr *naddr);
189 int (*add_vid)(void *intf_priv, int vid);
190 int (*del_vid)(void *intf_priv, int vid);
191 int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd);
192
193 /* used internally */
194 struct list_head module_list;
195 struct list_head interface_list;
196};
197
198int netcp_register_module(struct netcp_module *module);
199void netcp_unregister_module(struct netcp_module *module);
200void *netcp_module_get_intf_data(struct netcp_module *module,
201 struct netcp_intf *intf);
202
203int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
204 struct netcp_device *netcp_device,
205 const char *dma_chan_name, unsigned int dma_queue_id);
206int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe);
207int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe);
208
209typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet);
210int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
211 netcp_hook_rtn *hook_rtn, void *hook_data);
212int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
213 netcp_hook_rtn *hook_rtn, void *hook_data);
214int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
215 netcp_hook_rtn *hook_rtn, void *hook_data);
216int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
217 netcp_hook_rtn *hook_rtn, void *hook_data);
218void *netcp_device_find_module(struct netcp_device *netcp_device,
219 const char *name);
220
221/* SGMII functions */
222int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
223int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
224int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
225
226/* XGBE SERDES init functions */
227int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs);
228
229#endif /* __NETCP_H__ */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
new file mode 100644
index 000000000000..a31a8c3c8e7c
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -0,0 +1,2149 @@
1/*
2 * Keystone NetCP Core driver
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Murali Karicheri <m-karicheri2@ti.com>
10 * Wingman Kwok <w-kwok2@ti.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
15 *
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 */
21
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/of_net.h>
25#include <linux/of_address.h>
26#include <linux/if_vlan.h>
27#include <linux/pm_runtime.h>
28#include <linux/platform_device.h>
29#include <linux/soc/ti/knav_qmss.h>
30#include <linux/soc/ti/knav_dma.h>
31
32#include "netcp.h"
33
34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35#define NETCP_NAPI_WEIGHT 64
36#define NETCP_TX_TIMEOUT (5 * HZ)
37#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
38#define NETCP_MAX_MCAST_ADDR 16
39
40#define NETCP_EFUSE_REG_INDEX 0
41
42#define NETCP_MOD_PROBE_SKIPPED 1
43#define NETCP_MOD_PROBE_FAILED 2
44
45#define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
46 NETIF_MSG_DRV | NETIF_MSG_LINK | \
47 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
48 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
50 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
51 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
52 NETIF_MSG_RX_STATUS)
53
54#define knav_queue_get_id(q) knav_queue_device_control(q, \
55 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
56
57#define knav_queue_enable_notify(q) knav_queue_device_control(q, \
58 KNAV_QUEUE_ENABLE_NOTIFY, \
59 (unsigned long)NULL)
60
61#define knav_queue_disable_notify(q) knav_queue_device_control(q, \
62 KNAV_QUEUE_DISABLE_NOTIFY, \
63 (unsigned long)NULL)
64
65#define knav_queue_get_count(q) knav_queue_device_control(q, \
66 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
67
68#define for_each_netcp_module(module) \
69 list_for_each_entry(module, &netcp_modules, module_list)
70
71#define for_each_netcp_device_module(netcp_device, inst_modpriv) \
72 list_for_each_entry(inst_modpriv, \
73 &((netcp_device)->modpriv_head), inst_list)
74
75#define for_each_module(netcp, intf_modpriv) \
76 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
77
78/* Module management structures */
79struct netcp_device {
80 struct list_head device_list;
81 struct list_head interface_head;
82 struct list_head modpriv_head;
83 struct device *device;
84};
85
86struct netcp_inst_modpriv {
87 struct netcp_device *netcp_device;
88 struct netcp_module *netcp_module;
89 struct list_head inst_list;
90 void *module_priv;
91};
92
93struct netcp_intf_modpriv {
94 struct netcp_intf *netcp_priv;
95 struct netcp_module *netcp_module;
96 struct list_head intf_list;
97 void *module_priv;
98};
99
100static LIST_HEAD(netcp_devices);
101static LIST_HEAD(netcp_modules);
102static DEFINE_MUTEX(netcp_modules_lock);
103
104static int netcp_debug_level = -1;
105module_param(netcp_debug_level, int, 0);
106MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
107
108/* Helper functions - Get/Set */
109static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc,
110 struct knav_dma_desc *desc)
111{
112 *buff_len = desc->buff_len;
113 *buff = desc->buff;
114 *ndesc = desc->next_desc;
115}
116
117static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
118{
119 *pad0 = desc->pad[0];
120 *pad1 = desc->pad[1];
121}
122
123static void get_org_pkt_info(u32 *buff, u32 *buff_len,
124 struct knav_dma_desc *desc)
125{
126 *buff = desc->orig_buff;
127 *buff_len = desc->orig_len;
128}
129
130static void get_words(u32 *words, int num_words, u32 *desc)
131{
132 int i;
133
134 for (i = 0; i < num_words; i++)
135 words[i] = desc[i];
136}
137
138static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc,
139 struct knav_dma_desc *desc)
140{
141 desc->buff_len = buff_len;
142 desc->buff = buff;
143 desc->next_desc = ndesc;
144}
145
146static void set_desc_info(u32 desc_info, u32 pkt_info,
147 struct knav_dma_desc *desc)
148{
149 desc->desc_info = desc_info;
150 desc->packet_info = pkt_info;
151}
152
153static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
154{
155 desc->pad[0] = pad0;
156 desc->pad[1] = pad1;
157}
158
159static void set_org_pkt_info(u32 buff, u32 buff_len,
160 struct knav_dma_desc *desc)
161{
162 desc->orig_buff = buff;
163 desc->orig_len = buff_len;
164}
165
166static void set_words(u32 *words, int num_words, u32 *desc)
167{
168 int i;
169
170 for (i = 0; i < num_words; i++)
171 desc[i] = words[i];
172}
173
174/* Read the e-fuse value as 32 bit values to be endian independent */
175static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
176{
177 unsigned int addr0, addr1;
178
179 addr1 = readl(efuse_mac + 4);
180 addr0 = readl(efuse_mac);
181
182 x[0] = (addr1 & 0x0000ff00) >> 8;
183 x[1] = addr1 & 0x000000ff;
184 x[2] = (addr0 & 0xff000000) >> 24;
185 x[3] = (addr0 & 0x00ff0000) >> 16;
186 x[4] = (addr0 & 0x0000ff00) >> 8;
187 x[5] = addr0 & 0x000000ff;
188
189 return 0;
190}
191
192static const char *netcp_node_name(struct device_node *node)
193{
194 const char *name;
195
196 if (of_property_read_string(node, "label", &name) < 0)
197 name = node->name;
198 if (!name)
199 name = "unknown";
200 return name;
201}
202
203/* Module management routines */
204static int netcp_register_interface(struct netcp_intf *netcp)
205{
206 int ret;
207
208 ret = register_netdev(netcp->ndev);
209 if (!ret)
210 netcp->netdev_registered = true;
211 return ret;
212}
213
214static int netcp_module_probe(struct netcp_device *netcp_device,
215 struct netcp_module *module)
216{
217 struct device *dev = netcp_device->device;
218 struct device_node *devices, *interface, *node = dev->of_node;
219 struct device_node *child;
220 struct netcp_inst_modpriv *inst_modpriv;
221 struct netcp_intf *netcp_intf;
222 struct netcp_module *tmp;
223 bool primary_module_registered = false;
224 int ret;
225
226 /* Find this module in the sub-tree for this device */
227 devices = of_get_child_by_name(node, "netcp-devices");
228 if (!devices) {
229 dev_err(dev, "could not find netcp-devices node\n");
230 return NETCP_MOD_PROBE_SKIPPED;
231 }
232
233 for_each_available_child_of_node(devices, child) {
234 const char *name = netcp_node_name(child);
235
236 if (!strcasecmp(module->name, name))
237 break;
238 }
239
240 of_node_put(devices);
241 /* If module not used for this device, skip it */
242 if (!child) {
243 dev_warn(dev, "module(%s) not used for device\n", module->name);
244 return NETCP_MOD_PROBE_SKIPPED;
245 }
246
247 inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
248 if (!inst_modpriv) {
249 of_node_put(child);
250 return -ENOMEM;
251 }
252
253 inst_modpriv->netcp_device = netcp_device;
254 inst_modpriv->netcp_module = module;
255 list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
256
257 ret = module->probe(netcp_device, dev, child,
258 &inst_modpriv->module_priv);
259 of_node_put(child);
260 if (ret) {
261 dev_err(dev, "Probe of module(%s) failed with %d\n",
262 module->name, ret);
263 list_del(&inst_modpriv->inst_list);
264 devm_kfree(dev, inst_modpriv);
265 return NETCP_MOD_PROBE_FAILED;
266 }
267
268 /* Attach modules only if the primary module is probed */
269 for_each_netcp_module(tmp) {
270 if (tmp->primary)
271 primary_module_registered = true;
272 }
273
274 if (!primary_module_registered)
275 return 0;
276
277 /* Attach module to interfaces */
278 list_for_each_entry(netcp_intf, &netcp_device->interface_head,
279 interface_list) {
280 struct netcp_intf_modpriv *intf_modpriv;
281
282 /* If interface not registered then register now */
283 if (!netcp_intf->netdev_registered)
284 ret = netcp_register_interface(netcp_intf);
285
286 if (ret)
287 return -ENODEV;
288
289 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
290 GFP_KERNEL);
291 if (!intf_modpriv)
292 return -ENOMEM;
293
294 interface = of_parse_phandle(netcp_intf->node_interface,
295 module->name, 0);
296
297 intf_modpriv->netcp_priv = netcp_intf;
298 intf_modpriv->netcp_module = module;
299 list_add_tail(&intf_modpriv->intf_list,
300 &netcp_intf->module_head);
301
302 ret = module->attach(inst_modpriv->module_priv,
303 netcp_intf->ndev, interface,
304 &intf_modpriv->module_priv);
305 of_node_put(interface);
306 if (ret) {
307 dev_dbg(dev, "Attach of module %s declined with %d\n",
308 module->name, ret);
309 list_del(&intf_modpriv->intf_list);
310 devm_kfree(dev, intf_modpriv);
311 continue;
312 }
313 }
314 return 0;
315}
316
317int netcp_register_module(struct netcp_module *module)
318{
319 struct netcp_device *netcp_device;
320 struct netcp_module *tmp;
321 int ret;
322
323 if (!module->name) {
324 WARN(1, "error registering netcp module: no name\n");
325 return -EINVAL;
326 }
327
328 if (!module->probe) {
329 WARN(1, "error registering netcp module: no probe\n");
330 return -EINVAL;
331 }
332
333 mutex_lock(&netcp_modules_lock);
334
335 for_each_netcp_module(tmp) {
336 if (!strcasecmp(tmp->name, module->name)) {
337 mutex_unlock(&netcp_modules_lock);
338 return -EEXIST;
339 }
340 }
341 list_add_tail(&module->module_list, &netcp_modules);
342
343 list_for_each_entry(netcp_device, &netcp_devices, device_list) {
344 ret = netcp_module_probe(netcp_device, module);
345 if (ret < 0)
346 goto fail;
347 }
348
349 mutex_unlock(&netcp_modules_lock);
350 return 0;
351
352fail:
353 mutex_unlock(&netcp_modules_lock);
354 netcp_unregister_module(module);
355 return ret;
356}
357EXPORT_SYMBOL_GPL(netcp_register_module);
358
359static void netcp_release_module(struct netcp_device *netcp_device,
360 struct netcp_module *module)
361{
362 struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
363 struct netcp_intf *netcp_intf, *netcp_tmp;
364 struct device *dev = netcp_device->device;
365
366 /* Release the module from each interface */
367 list_for_each_entry_safe(netcp_intf, netcp_tmp,
368 &netcp_device->interface_head,
369 interface_list) {
370 struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
371
372 list_for_each_entry_safe(intf_modpriv, intf_tmp,
373 &netcp_intf->module_head,
374 intf_list) {
375 if (intf_modpriv->netcp_module == module) {
376 module->release(intf_modpriv->module_priv);
377 list_del(&intf_modpriv->intf_list);
378 devm_kfree(dev, intf_modpriv);
379 break;
380 }
381 }
382 }
383
384 /* Remove the module from each instance */
385 list_for_each_entry_safe(inst_modpriv, inst_tmp,
386 &netcp_device->modpriv_head, inst_list) {
387 if (inst_modpriv->netcp_module == module) {
388 module->remove(netcp_device,
389 inst_modpriv->module_priv);
390 list_del(&inst_modpriv->inst_list);
391 devm_kfree(dev, inst_modpriv);
392 break;
393 }
394 }
395}
396
397void netcp_unregister_module(struct netcp_module *module)
398{
399 struct netcp_device *netcp_device;
400 struct netcp_module *module_tmp;
401
402 mutex_lock(&netcp_modules_lock);
403
404 list_for_each_entry(netcp_device, &netcp_devices, device_list) {
405 netcp_release_module(netcp_device, module);
406 }
407
408 /* Remove the module from the module list */
409 for_each_netcp_module(module_tmp) {
410 if (module == module_tmp) {
411 list_del(&module->module_list);
412 break;
413 }
414 }
415
416 mutex_unlock(&netcp_modules_lock);
417}
418EXPORT_SYMBOL_GPL(netcp_unregister_module);
419
420void *netcp_module_get_intf_data(struct netcp_module *module,
421 struct netcp_intf *intf)
422{
423 struct netcp_intf_modpriv *intf_modpriv;
424
425 list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
426 if (intf_modpriv->netcp_module == module)
427 return intf_modpriv->module_priv;
428 return NULL;
429}
430EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
431
432/* Module TX and RX Hook management */
433struct netcp_hook_list {
434 struct list_head list;
435 netcp_hook_rtn *hook_rtn;
436 void *hook_data;
437 int order;
438};
439
440int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
441 netcp_hook_rtn *hook_rtn, void *hook_data)
442{
443 struct netcp_hook_list *entry;
444 struct netcp_hook_list *next;
445 unsigned long flags;
446
447 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
448 if (!entry)
449 return -ENOMEM;
450
451 entry->hook_rtn = hook_rtn;
452 entry->hook_data = hook_data;
453 entry->order = order;
454
455 spin_lock_irqsave(&netcp_priv->lock, flags);
456 list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
457 if (next->order > order)
458 break;
459 }
460 __list_add(&entry->list, next->list.prev, &next->list);
461 spin_unlock_irqrestore(&netcp_priv->lock, flags);
462
463 return 0;
464}
465EXPORT_SYMBOL_GPL(netcp_register_txhook);
466
467int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
468 netcp_hook_rtn *hook_rtn, void *hook_data)
469{
470 struct netcp_hook_list *next, *n;
471 unsigned long flags;
472
473 spin_lock_irqsave(&netcp_priv->lock, flags);
474 list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
475 if ((next->order == order) &&
476 (next->hook_rtn == hook_rtn) &&
477 (next->hook_data == hook_data)) {
478 list_del(&next->list);
479 spin_unlock_irqrestore(&netcp_priv->lock, flags);
480 devm_kfree(netcp_priv->dev, next);
481 return 0;
482 }
483 }
484 spin_unlock_irqrestore(&netcp_priv->lock, flags);
485 return -ENOENT;
486}
487EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
488
489int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
490 netcp_hook_rtn *hook_rtn, void *hook_data)
491{
492 struct netcp_hook_list *entry;
493 struct netcp_hook_list *next;
494 unsigned long flags;
495
496 entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
497 if (!entry)
498 return -ENOMEM;
499
500 entry->hook_rtn = hook_rtn;
501 entry->hook_data = hook_data;
502 entry->order = order;
503
504 spin_lock_irqsave(&netcp_priv->lock, flags);
505 list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
506 if (next->order > order)
507 break;
508 }
509 __list_add(&entry->list, next->list.prev, &next->list);
510 spin_unlock_irqrestore(&netcp_priv->lock, flags);
511
512 return 0;
513}
514
515int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
516 netcp_hook_rtn *hook_rtn, void *hook_data)
517{
518 struct netcp_hook_list *next, *n;
519 unsigned long flags;
520
521 spin_lock_irqsave(&netcp_priv->lock, flags);
522 list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
523 if ((next->order == order) &&
524 (next->hook_rtn == hook_rtn) &&
525 (next->hook_data == hook_data)) {
526 list_del(&next->list);
527 spin_unlock_irqrestore(&netcp_priv->lock, flags);
528 devm_kfree(netcp_priv->dev, next);
529 return 0;
530 }
531 }
532 spin_unlock_irqrestore(&netcp_priv->lock, flags);
533
534 return -ENOENT;
535}
536
537static void netcp_frag_free(bool is_frag, void *ptr)
538{
539 if (is_frag)
540 put_page(virt_to_head_page(ptr));
541 else
542 kfree(ptr);
543}
544
545static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
546 struct knav_dma_desc *desc)
547{
548 struct knav_dma_desc *ndesc;
549 dma_addr_t dma_desc, dma_buf;
550 unsigned int buf_len, dma_sz = sizeof(*ndesc);
551 void *buf_ptr;
552 u32 tmp;
553
554 get_words(&dma_desc, 1, &desc->next_desc);
555
556 while (dma_desc) {
557 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
558 if (unlikely(!ndesc)) {
559 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
560 break;
561 }
562 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
563 get_pad_info((u32 *)&buf_ptr, &tmp, ndesc);
564 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
565 __free_page(buf_ptr);
566 knav_pool_desc_put(netcp->rx_pool, desc);
567 }
568
569 get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
570 if (buf_ptr)
571 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
572 knav_pool_desc_put(netcp->rx_pool, desc);
573}
574
575static void netcp_empty_rx_queue(struct netcp_intf *netcp)
576{
577 struct knav_dma_desc *desc;
578 unsigned int dma_sz;
579 dma_addr_t dma;
580
581 for (; ;) {
582 dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
583 if (!dma)
584 break;
585
586 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
587 if (unlikely(!desc)) {
588 dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
589 __func__);
590 netcp->ndev->stats.rx_errors++;
591 continue;
592 }
593 netcp_free_rx_desc_chain(netcp, desc);
594 netcp->ndev->stats.rx_dropped++;
595 }
596}
597
598static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
599{
600 unsigned int dma_sz, buf_len, org_buf_len;
601 struct knav_dma_desc *desc, *ndesc;
602 unsigned int pkt_sz = 0, accum_sz;
603 struct netcp_hook_list *rx_hook;
604 dma_addr_t dma_desc, dma_buff;
605 struct netcp_packet p_info;
606 struct sk_buff *skb;
607 void *org_buf_ptr;
608 u32 tmp;
609
610 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
611 if (!dma_desc)
612 return -1;
613
614 desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
615 if (unlikely(!desc)) {
616 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
617 return 0;
618 }
619
620 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
621 get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
622
623 if (unlikely(!org_buf_ptr)) {
624 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
625 goto free_desc;
626 }
627
628 pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
629 accum_sz = buf_len;
630 dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
631
632 /* Build a new sk_buff for the primary buffer */
633 skb = build_skb(org_buf_ptr, org_buf_len);
634 if (unlikely(!skb)) {
635 dev_err(netcp->ndev_dev, "build_skb() failed\n");
636 goto free_desc;
637 }
638
639 /* update data, tail and len */
640 skb_reserve(skb, NETCP_SOP_OFFSET);
641 __skb_put(skb, buf_len);
642
643 /* Fill in the page fragment list */
644 while (dma_desc) {
645 struct page *page;
646
647 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
648 if (unlikely(!ndesc)) {
649 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
650 goto free_desc;
651 }
652
653 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
654 get_pad_info((u32 *)&page, &tmp, ndesc);
655
656 if (likely(dma_buff && buf_len && page)) {
657 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
658 DMA_FROM_DEVICE);
659 } else {
660 dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
661 (void *)dma_buff, buf_len, page);
662 goto free_desc;
663 }
664
665 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
666 offset_in_page(dma_buff), buf_len, PAGE_SIZE);
667 accum_sz += buf_len;
668
669 /* Free the descriptor */
670 knav_pool_desc_put(netcp->rx_pool, ndesc);
671 }
672
673 /* Free the primary descriptor */
674 knav_pool_desc_put(netcp->rx_pool, desc);
675
676 /* check for packet len and warn */
677 if (unlikely(pkt_sz != accum_sz))
678 dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
679 pkt_sz, accum_sz);
680
681 /* Remove ethernet FCS from the packet */
682 __pskb_trim(skb, skb->len - ETH_FCS_LEN);
683
684 /* Call each of the RX hooks */
685 p_info.skb = skb;
686 p_info.rxtstamp_complete = false;
687 list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
688 int ret;
689
690 ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
691 &p_info);
692 if (unlikely(ret)) {
693 dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
694 rx_hook->order, ret);
695 netcp->ndev->stats.rx_errors++;
696 dev_kfree_skb(skb);
697 return 0;
698 }
699 }
700
701 netcp->ndev->last_rx = jiffies;
702 netcp->ndev->stats.rx_packets++;
703 netcp->ndev->stats.rx_bytes += skb->len;
704
705 /* push skb up the stack */
706 skb->protocol = eth_type_trans(skb, netcp->ndev);
707 netif_receive_skb(skb);
708 return 0;
709
710free_desc:
711 netcp_free_rx_desc_chain(netcp, desc);
712 netcp->ndev->stats.rx_errors++;
713 return 0;
714}
715
716static int netcp_process_rx_packets(struct netcp_intf *netcp,
717 unsigned int budget)
718{
719 int i;
720
721 for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
722 ;
723 return i;
724}
725
726/* Release descriptors and attached buffers from Rx FDQ */
727static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
728{
729 struct knav_dma_desc *desc;
730 unsigned int buf_len, dma_sz;
731 dma_addr_t dma;
732 void *buf_ptr;
733 u32 tmp;
734
735 /* Allocate descriptor */
736 while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
737 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
738 if (unlikely(!desc)) {
739 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
740 continue;
741 }
742
743 get_org_pkt_info(&dma, &buf_len, desc);
744 get_pad_info((u32 *)&buf_ptr, &tmp, desc);
745
746 if (unlikely(!dma)) {
747 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
748 knav_pool_desc_put(netcp->rx_pool, desc);
749 continue;
750 }
751
752 if (unlikely(!buf_ptr)) {
753 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
754 knav_pool_desc_put(netcp->rx_pool, desc);
755 continue;
756 }
757
758 if (fdq == 0) {
759 dma_unmap_single(netcp->dev, dma, buf_len,
760 DMA_FROM_DEVICE);
761 netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
762 } else {
763 dma_unmap_page(netcp->dev, dma, buf_len,
764 DMA_FROM_DEVICE);
765 __free_page(buf_ptr);
766 }
767
768 knav_pool_desc_put(netcp->rx_pool, desc);
769 }
770}
771
772static void netcp_rxpool_free(struct netcp_intf *netcp)
773{
774 int i;
775
776 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
777 !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
778 netcp_free_rx_buf(netcp, i);
779
780 if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
781 dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
782 netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
783
784 knav_pool_destroy(netcp->rx_pool);
785 netcp->rx_pool = NULL;
786}
787
788static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
789{
790 struct knav_dma_desc *hwdesc;
791 unsigned int buf_len, dma_sz;
792 u32 desc_info, pkt_info;
793 struct page *page;
794 dma_addr_t dma;
795 void *bufptr;
796 u32 pad[2];
797
798 /* Allocate descriptor */
799 hwdesc = knav_pool_desc_get(netcp->rx_pool);
800 if (IS_ERR_OR_NULL(hwdesc)) {
801 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
802 return;
803 }
804
805 if (likely(fdq == 0)) {
806 unsigned int primary_buf_len;
807 /* Allocate a primary receive queue entry */
808 buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
809 primary_buf_len = SKB_DATA_ALIGN(buf_len) +
810 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
811
812 if (primary_buf_len <= PAGE_SIZE) {
813 bufptr = netdev_alloc_frag(primary_buf_len);
814 pad[1] = primary_buf_len;
815 } else {
816 bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
817 GFP_DMA32 | __GFP_COLD);
818 pad[1] = 0;
819 }
820
821 if (unlikely(!bufptr)) {
822 dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
823 goto fail;
824 }
825 dma = dma_map_single(netcp->dev, bufptr, buf_len,
826 DMA_TO_DEVICE);
827 pad[0] = (u32)bufptr;
828
829 } else {
830 /* Allocate a secondary receive queue entry */
831 page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
832 if (unlikely(!page)) {
833 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
834 goto fail;
835 }
836 buf_len = PAGE_SIZE;
837 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
838 pad[0] = (u32)page;
839 pad[1] = 0;
840 }
841
842 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
843 desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
844 pkt_info = KNAV_DMA_DESC_HAS_EPIB;
845 pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
846 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
847 KNAV_DMA_DESC_RETQ_SHIFT;
848 set_org_pkt_info(dma, buf_len, hwdesc);
849 set_pad_info(pad[0], pad[1], hwdesc);
850 set_desc_info(desc_info, pkt_info, hwdesc);
851
852 /* Push to FDQs */
853 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
854 &dma_sz);
855 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
856 return;
857
858fail:
859 knav_pool_desc_put(netcp->rx_pool, hwdesc);
860}
861
862/* Refill Rx FDQ with descriptors & attached buffers */
863static void netcp_rxpool_refill(struct netcp_intf *netcp)
864{
865 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
866 int i;
867
868 /* Calculate the FDQ deficit and refill */
869 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
870 fdq_deficit[i] = netcp->rx_queue_depths[i] -
871 knav_queue_get_count(netcp->rx_fdq[i]);
872
873 while (fdq_deficit[i]--)
874 netcp_allocate_rx_buf(netcp, i);
875 } /* end for fdqs */
876}
877
878/* NAPI poll */
879static int netcp_rx_poll(struct napi_struct *napi, int budget)
880{
881 struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
882 rx_napi);
883 unsigned int packets;
884
885 packets = netcp_process_rx_packets(netcp, budget);
886
887 if (packets < budget) {
888 napi_complete(&netcp->rx_napi);
889 knav_queue_enable_notify(netcp->rx_queue);
890 }
891
892 netcp_rxpool_refill(netcp);
893 return packets;
894}
895
896static void netcp_rx_notify(void *arg)
897{
898 struct netcp_intf *netcp = arg;
899
900 knav_queue_disable_notify(netcp->rx_queue);
901 napi_schedule(&netcp->rx_napi);
902}
903
904static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
905 struct knav_dma_desc *desc,
906 unsigned int desc_sz)
907{
908 struct knav_dma_desc *ndesc = desc;
909 dma_addr_t dma_desc, dma_buf;
910 unsigned int buf_len;
911
912 while (ndesc) {
913 get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
914
915 if (dma_buf && buf_len)
916 dma_unmap_single(netcp->dev, dma_buf, buf_len,
917 DMA_TO_DEVICE);
918 else
919 dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n",
920 (void *)dma_buf, buf_len);
921
922 knav_pool_desc_put(netcp->tx_pool, ndesc);
923 ndesc = NULL;
924 if (dma_desc) {
925 ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
926 desc_sz);
927 if (!ndesc)
928 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
929 }
930 }
931}
932
933static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
934 unsigned int budget)
935{
936 struct knav_dma_desc *desc;
937 struct sk_buff *skb;
938 unsigned int dma_sz;
939 dma_addr_t dma;
940 int pkts = 0;
941 u32 tmp;
942
943 while (budget--) {
944 dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
945 if (!dma)
946 break;
947 desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
948 if (unlikely(!desc)) {
949 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
950 netcp->ndev->stats.tx_errors++;
951 continue;
952 }
953
954 get_pad_info((u32 *)&skb, &tmp, desc);
955 netcp_free_tx_desc_chain(netcp, desc, dma_sz);
956 if (!skb) {
957 dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
958 netcp->ndev->stats.tx_errors++;
959 continue;
960 }
961
962 if (netif_subqueue_stopped(netcp->ndev, skb) &&
963 netif_running(netcp->ndev) &&
964 (knav_pool_count(netcp->tx_pool) >
965 netcp->tx_resume_threshold)) {
966 u16 subqueue = skb_get_queue_mapping(skb);
967
968 netif_wake_subqueue(netcp->ndev, subqueue);
969 }
970
971 netcp->ndev->stats.tx_packets++;
972 netcp->ndev->stats.tx_bytes += skb->len;
973 dev_kfree_skb(skb);
974 pkts++;
975 }
976 return pkts;
977}
978
979static int netcp_tx_poll(struct napi_struct *napi, int budget)
980{
981 int packets;
982 struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
983 tx_napi);
984
985 packets = netcp_process_tx_compl_packets(netcp, budget);
986 if (packets < budget) {
987 napi_complete(&netcp->tx_napi);
988 knav_queue_enable_notify(netcp->tx_compl_q);
989 }
990
991 return packets;
992}
993
994static void netcp_tx_notify(void *arg)
995{
996 struct netcp_intf *netcp = arg;
997
998 knav_queue_disable_notify(netcp->tx_compl_q);
999 napi_schedule(&netcp->tx_napi);
1000}
1001
1002static struct knav_dma_desc*
1003netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1004{
1005 struct knav_dma_desc *desc, *ndesc, *pdesc;
1006 unsigned int pkt_len = skb_headlen(skb);
1007 struct device *dev = netcp->dev;
1008 dma_addr_t dma_addr;
1009 unsigned int dma_sz;
1010 int i;
1011
1012 /* Map the linear buffer */
1013 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1014 if (unlikely(!dma_addr)) {
1015 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1016 return NULL;
1017 }
1018
1019 desc = knav_pool_desc_get(netcp->tx_pool);
1020 if (unlikely(IS_ERR_OR_NULL(desc))) {
1021 dev_err(netcp->ndev_dev, "out of TX desc\n");
1022 dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
1023 return NULL;
1024 }
1025
1026 set_pkt_info(dma_addr, pkt_len, 0, desc);
1027 if (skb_is_nonlinear(skb)) {
1028 prefetchw(skb_shinfo(skb));
1029 } else {
1030 desc->next_desc = 0;
1031 goto upd_pkt_len;
1032 }
1033
1034 pdesc = desc;
1035
1036 /* Handle the case where skb is fragmented in pages */
1037 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1038 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1039 struct page *page = skb_frag_page(frag);
1040 u32 page_offset = frag->page_offset;
1041 u32 buf_len = skb_frag_size(frag);
1042 dma_addr_t desc_dma;
1043 u32 pkt_info;
1044
1045 dma_addr = dma_map_page(dev, page, page_offset, buf_len,
1046 DMA_TO_DEVICE);
1047 if (unlikely(!dma_addr)) {
1048 dev_err(netcp->ndev_dev, "Failed to map skb page\n");
1049 goto free_descs;
1050 }
1051
1052 ndesc = knav_pool_desc_get(netcp->tx_pool);
1053 if (unlikely(IS_ERR_OR_NULL(ndesc))) {
1054 dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
1055 dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
1056 goto free_descs;
1057 }
1058
1059 desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool,
1060 (void *)ndesc);
1061 pkt_info =
1062 (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1063 KNAV_DMA_DESC_RETQ_SHIFT;
1064 set_pkt_info(dma_addr, buf_len, 0, ndesc);
1065 set_words(&desc_dma, 1, &pdesc->next_desc);
1066 pkt_len += buf_len;
1067 if (pdesc != desc)
1068 knav_pool_desc_map(netcp->tx_pool, pdesc,
1069 sizeof(*pdesc), &desc_dma, &dma_sz);
1070 pdesc = ndesc;
1071 }
1072 if (pdesc != desc)
1073 knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
1074 &dma_addr, &dma_sz);
1075
1076 /* frag list based linkage is not supported for now. */
1077 if (skb_shinfo(skb)->frag_list) {
1078 dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
1079 goto free_descs;
1080 }
1081
1082upd_pkt_len:
1083 WARN_ON(pkt_len != skb->len);
1084
1085 pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
1086 set_words(&pkt_len, 1, &desc->desc_info);
1087 return desc;
1088
1089free_descs:
1090 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1091 return NULL;
1092}
1093
1094static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1095 struct sk_buff *skb,
1096 struct knav_dma_desc *desc)
1097{
1098 struct netcp_tx_pipe *tx_pipe = NULL;
1099 struct netcp_hook_list *tx_hook;
1100 struct netcp_packet p_info;
1101 u32 packet_info = 0;
1102 unsigned int dma_sz;
1103 dma_addr_t dma;
1104 int ret = 0;
1105
1106 p_info.netcp = netcp;
1107 p_info.skb = skb;
1108 p_info.tx_pipe = NULL;
1109 p_info.psdata_len = 0;
1110 p_info.ts_context = NULL;
1111 p_info.txtstamp_complete = NULL;
1112 p_info.epib = desc->epib;
1113 p_info.psdata = desc->psdata;
1114 memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32));
1115
1116 /* Find out where to inject the packet for transmission */
1117 list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
1118 ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
1119 &p_info);
1120 if (unlikely(ret != 0)) {
1121 dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
1122 tx_hook->order, ret);
1123 ret = (ret < 0) ? ret : NETDEV_TX_OK;
1124 goto out;
1125 }
1126 }
1127
1128 /* Make sure some TX hook claimed the packet */
1129 tx_pipe = p_info.tx_pipe;
1130 if (!tx_pipe) {
1131 dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
1132 ret = -ENXIO;
1133 goto out;
1134 }
1135
1136 /* update descriptor */
1137 if (p_info.psdata_len) {
1138 u32 *psdata = p_info.psdata;
1139
1140 memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
1141 p_info.psdata_len);
1142 set_words(psdata, p_info.psdata_len, psdata);
1143 packet_info |=
1144 (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
1145 KNAV_DMA_DESC_PSLEN_SHIFT;
1146 }
1147
1148 packet_info |= KNAV_DMA_DESC_HAS_EPIB |
1149 ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1150 KNAV_DMA_DESC_RETQ_SHIFT) |
1151 ((tx_pipe->dma_psflags & KNAV_DMA_DESC_PSFLAG_MASK) <<
1152 KNAV_DMA_DESC_PSFLAG_SHIFT);
1153
1154 set_words(&packet_info, 1, &desc->packet_info);
1155 set_words((u32 *)&skb, 1, &desc->pad[0]);
1156
1157 /* submit packet descriptor */
1158 ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
1159 &dma_sz);
1160 if (unlikely(ret)) {
1161 dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
1162 ret = -ENOMEM;
1163 goto out;
1164 }
1165 skb_tx_timestamp(skb);
1166 knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
1167
1168out:
1169 return ret;
1170}
1171
1172/* Submit the packet */
1173static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1174{
1175 struct netcp_intf *netcp = netdev_priv(ndev);
1176 int subqueue = skb_get_queue_mapping(skb);
1177 struct knav_dma_desc *desc;
1178 int desc_count, ret = 0;
1179
1180 if (unlikely(skb->len <= 0)) {
1181 dev_kfree_skb(skb);
1182 return NETDEV_TX_OK;
1183 }
1184
1185 if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
1186 ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
1187 if (ret < 0) {
1188 /* If we get here, the skb has already been dropped */
1189 dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
1190 ret);
1191 ndev->stats.tx_dropped++;
1192 return ret;
1193 }
1194 skb->len = NETCP_MIN_PACKET_SIZE;
1195 }
1196
1197 desc = netcp_tx_map_skb(skb, netcp);
1198 if (unlikely(!desc)) {
1199 netif_stop_subqueue(ndev, subqueue);
1200 ret = -ENOBUFS;
1201 goto drop;
1202 }
1203
1204 ret = netcp_tx_submit_skb(netcp, skb, desc);
1205 if (ret)
1206 goto drop;
1207
1208 ndev->trans_start = jiffies;
1209
1210 /* Check Tx pool count & stop subqueue if needed */
1211 desc_count = knav_pool_count(netcp->tx_pool);
1212 if (desc_count < netcp->tx_pause_threshold) {
1213 dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
1214 netif_stop_subqueue(ndev, subqueue);
1215 }
1216 return NETDEV_TX_OK;
1217
1218drop:
1219 ndev->stats.tx_dropped++;
1220 if (desc)
1221 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1222 dev_kfree_skb(skb);
1223 return ret;
1224}
1225
1226int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
1227{
1228 if (tx_pipe->dma_channel) {
1229 knav_dma_close_channel(tx_pipe->dma_channel);
1230 tx_pipe->dma_channel = NULL;
1231 }
1232 return 0;
1233}
1234EXPORT_SYMBOL_GPL(netcp_txpipe_close);
1235
1236int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
1237{
1238 struct device *dev = tx_pipe->netcp_device->device;
1239 struct knav_dma_cfg config;
1240 int ret = 0;
1241 u8 name[16];
1242
1243 memset(&config, 0, sizeof(config));
1244 config.direction = DMA_MEM_TO_DEV;
1245 config.u.tx.filt_einfo = false;
1246 config.u.tx.filt_pswords = false;
1247 config.u.tx.priority = DMA_PRIO_MED_L;
1248
1249 tx_pipe->dma_channel = knav_dma_open_channel(dev,
1250 tx_pipe->dma_chan_name, &config);
1251 if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) {
1252 dev_err(dev, "failed opening tx chan(%s)\n",
1253 tx_pipe->dma_chan_name);
1254 goto err;
1255 }
1256
1257 snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
1258 tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
1259 KNAV_QUEUE_SHARED);
1260 if (IS_ERR(tx_pipe->dma_queue)) {
1261 dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
1262 name, ret);
1263 ret = PTR_ERR(tx_pipe->dma_queue);
1264 goto err;
1265 }
1266
1267 dev_dbg(dev, "opened tx pipe %s\n", name);
1268 return 0;
1269
1270err:
1271 if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
1272 knav_dma_close_channel(tx_pipe->dma_channel);
1273 tx_pipe->dma_channel = NULL;
1274 return ret;
1275}
1276EXPORT_SYMBOL_GPL(netcp_txpipe_open);
1277
1278int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
1279 struct netcp_device *netcp_device,
1280 const char *dma_chan_name, unsigned int dma_queue_id)
1281{
1282 memset(tx_pipe, 0, sizeof(*tx_pipe));
1283 tx_pipe->netcp_device = netcp_device;
1284 tx_pipe->dma_chan_name = dma_chan_name;
1285 tx_pipe->dma_queue_id = dma_queue_id;
1286 return 0;
1287}
1288EXPORT_SYMBOL_GPL(netcp_txpipe_init);
1289
1290static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
1291 const u8 *addr,
1292 enum netcp_addr_type type)
1293{
1294 struct netcp_addr *naddr;
1295
1296 list_for_each_entry(naddr, &netcp->addr_list, node) {
1297 if (naddr->type != type)
1298 continue;
1299 if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
1300 continue;
1301 return naddr;
1302 }
1303
1304 return NULL;
1305}
1306
1307static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
1308 const u8 *addr,
1309 enum netcp_addr_type type)
1310{
1311 struct netcp_addr *naddr;
1312
1313 naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
1314 if (!naddr)
1315 return NULL;
1316
1317 naddr->type = type;
1318 naddr->flags = 0;
1319 naddr->netcp = netcp;
1320 if (addr)
1321 ether_addr_copy(naddr->addr, addr);
1322 else
1323 memset(naddr->addr, 0, ETH_ALEN);
1324 list_add_tail(&naddr->node, &netcp->addr_list);
1325
1326 return naddr;
1327}
1328
1329static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
1330{
1331 list_del(&naddr->node);
1332 devm_kfree(netcp->dev, naddr);
1333}
1334
1335static void netcp_addr_clear_mark(struct netcp_intf *netcp)
1336{
1337 struct netcp_addr *naddr;
1338
1339 list_for_each_entry(naddr, &netcp->addr_list, node)
1340 naddr->flags = 0;
1341}
1342
1343static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
1344 enum netcp_addr_type type)
1345{
1346 struct netcp_addr *naddr;
1347
1348 naddr = netcp_addr_find(netcp, addr, type);
1349 if (naddr) {
1350 naddr->flags |= ADDR_VALID;
1351 return;
1352 }
1353
1354 naddr = netcp_addr_add(netcp, addr, type);
1355 if (!WARN_ON(!naddr))
1356 naddr->flags |= ADDR_NEW;
1357}
1358
1359static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1360{
1361 struct netcp_addr *naddr, *tmp;
1362 struct netcp_intf_modpriv *priv;
1363 struct netcp_module *module;
1364 int error;
1365
1366 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1367 if (naddr->flags & (ADDR_VALID | ADDR_NEW))
1368 continue;
1369 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
1370 naddr->addr, naddr->type);
1371 mutex_lock(&netcp_modules_lock);
1372 for_each_module(netcp, priv) {
1373 module = priv->netcp_module;
1374 if (!module->del_addr)
1375 continue;
1376 error = module->del_addr(priv->module_priv,
1377 naddr);
1378 WARN_ON(error);
1379 }
1380 mutex_unlock(&netcp_modules_lock);
1381 netcp_addr_del(netcp, naddr);
1382 }
1383}
1384
1385static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1386{
1387 struct netcp_addr *naddr, *tmp;
1388 struct netcp_intf_modpriv *priv;
1389 struct netcp_module *module;
1390 int error;
1391
1392 list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1393 if (!(naddr->flags & ADDR_NEW))
1394 continue;
1395 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
1396 naddr->addr, naddr->type);
1397 mutex_lock(&netcp_modules_lock);
1398 for_each_module(netcp, priv) {
1399 module = priv->netcp_module;
1400 if (!module->add_addr)
1401 continue;
1402 error = module->add_addr(priv->module_priv, naddr);
1403 WARN_ON(error);
1404 }
1405 mutex_unlock(&netcp_modules_lock);
1406 }
1407}
1408
1409static void netcp_set_rx_mode(struct net_device *ndev)
1410{
1411 struct netcp_intf *netcp = netdev_priv(ndev);
1412 struct netdev_hw_addr *ndev_addr;
1413 bool promisc;
1414
1415 promisc = (ndev->flags & IFF_PROMISC ||
1416 ndev->flags & IFF_ALLMULTI ||
1417 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
1418
1419 /* first clear all marks */
1420 netcp_addr_clear_mark(netcp);
1421
1422 /* next add new entries, mark existing ones */
1423 netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
1424 for_each_dev_addr(ndev, ndev_addr)
1425 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
1426 netdev_for_each_uc_addr(ndev_addr, ndev)
1427 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
1428 netdev_for_each_mc_addr(ndev_addr, ndev)
1429 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
1430
1431 if (promisc)
1432 netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
1433
1434 /* finally sweep and callout into modules */
1435 netcp_addr_sweep_del(netcp);
1436 netcp_addr_sweep_add(netcp);
1437}
1438
1439static void netcp_free_navigator_resources(struct netcp_intf *netcp)
1440{
1441 int i;
1442
1443 if (netcp->rx_channel) {
1444 knav_dma_close_channel(netcp->rx_channel);
1445 netcp->rx_channel = NULL;
1446 }
1447
1448 if (!IS_ERR_OR_NULL(netcp->rx_pool))
1449 netcp_rxpool_free(netcp);
1450
1451 if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
1452 knav_queue_close(netcp->rx_queue);
1453 netcp->rx_queue = NULL;
1454 }
1455
1456 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1457 !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
1458 knav_queue_close(netcp->rx_fdq[i]);
1459 netcp->rx_fdq[i] = NULL;
1460 }
1461
1462 if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1463 knav_queue_close(netcp->tx_compl_q);
1464 netcp->tx_compl_q = NULL;
1465 }
1466
1467 if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
1468 knav_pool_destroy(netcp->tx_pool);
1469 netcp->tx_pool = NULL;
1470 }
1471}
1472
1473static int netcp_setup_navigator_resources(struct net_device *ndev)
1474{
1475 struct netcp_intf *netcp = netdev_priv(ndev);
1476 struct knav_queue_notify_config notify_cfg;
1477 struct knav_dma_cfg config;
1478 u32 last_fdq = 0;
1479 u8 name[16];
1480 int ret;
1481 int i;
1482
1483 /* Create Rx/Tx descriptor pools */
1484 snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
1485 netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
1486 netcp->rx_pool_region_id);
1487 if (IS_ERR_OR_NULL(netcp->rx_pool)) {
1488 dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
1489 ret = PTR_ERR(netcp->rx_pool);
1490 goto fail;
1491 }
1492
1493 snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
1494 netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
1495 netcp->tx_pool_region_id);
1496 if (IS_ERR_OR_NULL(netcp->tx_pool)) {
1497 dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
1498 ret = PTR_ERR(netcp->tx_pool);
1499 goto fail;
1500 }
1501
1502 /* open Tx completion queue */
1503 snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
1504 netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
1505 if (IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1506 ret = PTR_ERR(netcp->tx_compl_q);
1507 goto fail;
1508 }
1509 netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
1510
1511 /* Set notification for Tx completion */
1512 notify_cfg.fn = netcp_tx_notify;
1513 notify_cfg.fn_arg = netcp;
1514 ret = knav_queue_device_control(netcp->tx_compl_q,
1515 KNAV_QUEUE_SET_NOTIFIER,
1516 (unsigned long)&notify_cfg);
1517 if (ret)
1518 goto fail;
1519
1520 knav_queue_disable_notify(netcp->tx_compl_q);
1521
1522 /* open Rx completion queue */
1523 snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
1524 netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
1525 if (IS_ERR_OR_NULL(netcp->rx_queue)) {
1526 ret = PTR_ERR(netcp->rx_queue);
1527 goto fail;
1528 }
1529 netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
1530
1531 /* Set notification for Rx completion */
1532 notify_cfg.fn = netcp_rx_notify;
1533 notify_cfg.fn_arg = netcp;
1534 ret = knav_queue_device_control(netcp->rx_queue,
1535 KNAV_QUEUE_SET_NOTIFIER,
1536 (unsigned long)&notify_cfg);
1537 if (ret)
1538 goto fail;
1539
1540 knav_queue_disable_notify(netcp->rx_queue);
1541
1542 /* open Rx FDQs */
1543 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1544 netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
1545 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1546 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1547 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
1548 ret = PTR_ERR(netcp->rx_fdq[i]);
1549 goto fail;
1550 }
1551 }
1552
1553 memset(&config, 0, sizeof(config));
1554 config.direction = DMA_DEV_TO_MEM;
1555 config.u.rx.einfo_present = true;
1556 config.u.rx.psinfo_present = true;
1557 config.u.rx.err_mode = DMA_DROP;
1558 config.u.rx.desc_type = DMA_DESC_HOST;
1559 config.u.rx.psinfo_at_sop = false;
1560 config.u.rx.sop_offset = NETCP_SOP_OFFSET;
1561 config.u.rx.dst_q = netcp->rx_queue_id;
1562 config.u.rx.thresh = DMA_THRESH_NONE;
1563
1564 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
1565 if (netcp->rx_fdq[i])
1566 last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
1567 config.u.rx.fdq[i] = last_fdq;
1568 }
1569
1570 netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
1571 netcp->dma_chan_name, &config);
1572 if (IS_ERR_OR_NULL(netcp->rx_channel)) {
1573 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
1574 netcp->dma_chan_name);
1575 goto fail;
1576 }
1577
1578 dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
1579 return 0;
1580
1581fail:
1582 netcp_free_navigator_resources(netcp);
1583 return ret;
1584}
1585
1586/* Open the device */
1587static int netcp_ndo_open(struct net_device *ndev)
1588{
1589 struct netcp_intf *netcp = netdev_priv(ndev);
1590 struct netcp_intf_modpriv *intf_modpriv;
1591 struct netcp_module *module;
1592 int ret;
1593
1594 netif_carrier_off(ndev);
1595 ret = netcp_setup_navigator_resources(ndev);
1596 if (ret) {
1597 dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
1598 goto fail;
1599 }
1600
1601 mutex_lock(&netcp_modules_lock);
1602 for_each_module(netcp, intf_modpriv) {
1603 module = intf_modpriv->netcp_module;
1604 if (module->open) {
1605 ret = module->open(intf_modpriv->module_priv, ndev);
1606 if (ret != 0) {
1607 dev_err(netcp->ndev_dev, "module open failed\n");
1608 goto fail_open;
1609 }
1610 }
1611 }
1612 mutex_unlock(&netcp_modules_lock);
1613
1614 netcp_rxpool_refill(netcp);
1615 napi_enable(&netcp->rx_napi);
1616 napi_enable(&netcp->tx_napi);
1617 knav_queue_enable_notify(netcp->tx_compl_q);
1618 knav_queue_enable_notify(netcp->rx_queue);
1619 netif_tx_wake_all_queues(ndev);
1620 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1621 return 0;
1622
1623fail_open:
1624 for_each_module(netcp, intf_modpriv) {
1625 module = intf_modpriv->netcp_module;
1626 if (module->close)
1627 module->close(intf_modpriv->module_priv, ndev);
1628 }
1629 mutex_unlock(&netcp_modules_lock);
1630
1631fail:
1632 netcp_free_navigator_resources(netcp);
1633 return ret;
1634}
1635
1636/* Close the device */
1637static int netcp_ndo_stop(struct net_device *ndev)
1638{
1639 struct netcp_intf *netcp = netdev_priv(ndev);
1640 struct netcp_intf_modpriv *intf_modpriv;
1641 struct netcp_module *module;
1642 int err = 0;
1643
1644 netif_tx_stop_all_queues(ndev);
1645 netif_carrier_off(ndev);
1646 netcp_addr_clear_mark(netcp);
1647 netcp_addr_sweep_del(netcp);
1648 knav_queue_disable_notify(netcp->rx_queue);
1649 knav_queue_disable_notify(netcp->tx_compl_q);
1650 napi_disable(&netcp->rx_napi);
1651 napi_disable(&netcp->tx_napi);
1652
1653 mutex_lock(&netcp_modules_lock);
1654 for_each_module(netcp, intf_modpriv) {
1655 module = intf_modpriv->netcp_module;
1656 if (module->close) {
1657 err = module->close(intf_modpriv->module_priv, ndev);
1658 if (err != 0)
1659 dev_err(netcp->ndev_dev, "Close failed\n");
1660 }
1661 }
1662 mutex_unlock(&netcp_modules_lock);
1663
1664 /* Recycle Rx descriptors from completion queue */
1665 netcp_empty_rx_queue(netcp);
1666
1667 /* Recycle Tx descriptors from completion queue */
1668 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1669
1670 if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
1671 dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
1672 netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
1673
1674 netcp_free_navigator_resources(netcp);
1675 dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
1676 return 0;
1677}
1678
1679static int netcp_ndo_ioctl(struct net_device *ndev,
1680 struct ifreq *req, int cmd)
1681{
1682 struct netcp_intf *netcp = netdev_priv(ndev);
1683 struct netcp_intf_modpriv *intf_modpriv;
1684 struct netcp_module *module;
1685 int ret = -1, err = -EOPNOTSUPP;
1686
1687 if (!netif_running(ndev))
1688 return -EINVAL;
1689
1690 mutex_lock(&netcp_modules_lock);
1691 for_each_module(netcp, intf_modpriv) {
1692 module = intf_modpriv->netcp_module;
1693 if (!module->ioctl)
1694 continue;
1695
1696 err = module->ioctl(intf_modpriv->module_priv, req, cmd);
1697 if ((err < 0) && (err != -EOPNOTSUPP)) {
1698 ret = err;
1699 goto out;
1700 }
1701 if (err == 0)
1702 ret = err;
1703 }
1704
1705out:
1706 mutex_unlock(&netcp_modules_lock);
1707 return (ret == 0) ? 0 : err;
1708}
1709
1710static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu)
1711{
1712 struct netcp_intf *netcp = netdev_priv(ndev);
1713
1714 /* MTU < 68 is an error for IPv4 traffic */
1715 if ((new_mtu < 68) ||
1716 (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) {
1717 dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu);
1718 return -EINVAL;
1719 }
1720
1721 ndev->mtu = new_mtu;
1722 return 0;
1723}
1724
1725static void netcp_ndo_tx_timeout(struct net_device *ndev)
1726{
1727 struct netcp_intf *netcp = netdev_priv(ndev);
1728 unsigned int descs = knav_pool_count(netcp->tx_pool);
1729
1730 dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
1731 netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1732 ndev->trans_start = jiffies;
1733 netif_tx_wake_all_queues(ndev);
1734}
1735
1736static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1737{
1738 struct netcp_intf *netcp = netdev_priv(ndev);
1739 struct netcp_intf_modpriv *intf_modpriv;
1740 struct netcp_module *module;
1741 int err = 0;
1742
1743 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
1744
1745 mutex_lock(&netcp_modules_lock);
1746 for_each_module(netcp, intf_modpriv) {
1747 module = intf_modpriv->netcp_module;
1748 if ((module->add_vid) && (vid != 0)) {
1749 err = module->add_vid(intf_modpriv->module_priv, vid);
1750 if (err != 0) {
1751 dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
1752 vid);
1753 break;
1754 }
1755 }
1756 }
1757 mutex_unlock(&netcp_modules_lock);
1758 return err;
1759}
1760
1761static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1762{
1763 struct netcp_intf *netcp = netdev_priv(ndev);
1764 struct netcp_intf_modpriv *intf_modpriv;
1765 struct netcp_module *module;
1766 int err = 0;
1767
1768 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
1769
1770 mutex_lock(&netcp_modules_lock);
1771 for_each_module(netcp, intf_modpriv) {
1772 module = intf_modpriv->netcp_module;
1773 if (module->del_vid) {
1774 err = module->del_vid(intf_modpriv->module_priv, vid);
1775 if (err != 0) {
1776 dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
1777 vid);
1778 break;
1779 }
1780 }
1781 }
1782 mutex_unlock(&netcp_modules_lock);
1783 return err;
1784}
1785
1786static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
1787 void *accel_priv,
1788 select_queue_fallback_t fallback)
1789{
1790 return 0;
1791}
1792
1793static int netcp_setup_tc(struct net_device *dev, u8 num_tc)
1794{
1795 int i;
1796
1797 /* setup tc must be called under rtnl lock */
1798 ASSERT_RTNL();
1799
1800 /* Sanity-check the number of traffic classes requested */
1801 if ((dev->real_num_tx_queues <= 1) ||
1802 (dev->real_num_tx_queues < num_tc))
1803 return -EINVAL;
1804
1805 /* Configure traffic class to queue mappings */
1806 if (num_tc) {
1807 netdev_set_num_tc(dev, num_tc);
1808 for (i = 0; i < num_tc; i++)
1809 netdev_set_tc_queue(dev, i, 1, i);
1810 } else {
1811 netdev_reset_tc(dev);
1812 }
1813
1814 return 0;
1815}
1816
1817static const struct net_device_ops netcp_netdev_ops = {
1818 .ndo_open = netcp_ndo_open,
1819 .ndo_stop = netcp_ndo_stop,
1820 .ndo_start_xmit = netcp_ndo_start_xmit,
1821 .ndo_set_rx_mode = netcp_set_rx_mode,
1822 .ndo_do_ioctl = netcp_ndo_ioctl,
1823 .ndo_change_mtu = netcp_ndo_change_mtu,
1824 .ndo_set_mac_address = eth_mac_addr,
1825 .ndo_validate_addr = eth_validate_addr,
1826 .ndo_vlan_rx_add_vid = netcp_rx_add_vid,
1827 .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
1828 .ndo_tx_timeout = netcp_ndo_tx_timeout,
1829 .ndo_select_queue = netcp_select_queue,
1830 .ndo_setup_tc = netcp_setup_tc,
1831};
1832
1833static int netcp_create_interface(struct netcp_device *netcp_device,
1834 struct device_node *node_interface)
1835{
1836 struct device *dev = netcp_device->device;
1837 struct device_node *node = dev->of_node;
1838 struct netcp_intf *netcp;
1839 struct net_device *ndev;
1840 resource_size_t size;
1841 struct resource res;
1842 void __iomem *efuse = NULL;
1843 u32 efuse_mac = 0;
1844 const void *mac_addr;
1845 u8 efuse_mac_addr[6];
1846 u32 temp[2];
1847 int ret = 0;
1848
1849 ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
1850 if (!ndev) {
1851 dev_err(dev, "Error allocating netdev\n");
1852 return -ENOMEM;
1853 }
1854
1855 ndev->features |= NETIF_F_SG;
1856 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1857 ndev->hw_features = ndev->features;
1858 ndev->vlan_features |= NETIF_F_SG;
1859
1860 netcp = netdev_priv(ndev);
1861 spin_lock_init(&netcp->lock);
1862 INIT_LIST_HEAD(&netcp->module_head);
1863 INIT_LIST_HEAD(&netcp->txhook_list_head);
1864 INIT_LIST_HEAD(&netcp->rxhook_list_head);
1865 INIT_LIST_HEAD(&netcp->addr_list);
1866 netcp->netcp_device = netcp_device;
1867 netcp->dev = netcp_device->device;
1868 netcp->ndev = ndev;
1869 netcp->ndev_dev = &ndev->dev;
1870 netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
1871 netcp->tx_pause_threshold = MAX_SKB_FRAGS;
1872 netcp->tx_resume_threshold = netcp->tx_pause_threshold;
1873 netcp->node_interface = node_interface;
1874
1875 ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
1876 if (efuse_mac) {
1877 if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
1878 dev_err(dev, "could not find efuse-mac reg resource\n");
1879 ret = -ENODEV;
1880 goto quit;
1881 }
1882 size = resource_size(&res);
1883
1884 if (!devm_request_mem_region(dev, res.start, size,
1885 dev_name(dev))) {
1886 dev_err(dev, "could not reserve resource\n");
1887 ret = -ENOMEM;
1888 goto quit;
1889 }
1890
1891 efuse = devm_ioremap_nocache(dev, res.start, size);
1892 if (!efuse) {
1893 dev_err(dev, "could not map resource\n");
1894 devm_release_mem_region(dev, res.start, size);
1895 ret = -ENOMEM;
1896 goto quit;
1897 }
1898
1899 emac_arch_get_mac_addr(efuse_mac_addr, efuse);
1900 if (is_valid_ether_addr(efuse_mac_addr))
1901 ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
1902 else
1903 random_ether_addr(ndev->dev_addr);
1904
1905 devm_iounmap(dev, efuse);
1906 devm_release_mem_region(dev, res.start, size);
1907 } else {
1908 mac_addr = of_get_mac_address(node_interface);
1909 if (mac_addr)
1910 ether_addr_copy(ndev->dev_addr, mac_addr);
1911 else
1912 random_ether_addr(ndev->dev_addr);
1913 }
1914
1915 ret = of_property_read_string(node_interface, "rx-channel",
1916 &netcp->dma_chan_name);
1917 if (ret < 0) {
1918 dev_err(dev, "missing \"rx-channel\" parameter\n");
1919 ret = -ENODEV;
1920 goto quit;
1921 }
1922
1923 ret = of_property_read_u32(node_interface, "rx-queue",
1924 &netcp->rx_queue_id);
1925 if (ret < 0) {
1926 dev_warn(dev, "missing \"rx-queue\" parameter\n");
1927 netcp->rx_queue_id = KNAV_QUEUE_QPEND;
1928 }
1929
1930 ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
1931 netcp->rx_queue_depths,
1932 KNAV_DMA_FDQ_PER_CHAN);
1933 if (ret < 0) {
1934 dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
1935 netcp->rx_queue_depths[0] = 128;
1936 }
1937
1938 ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
1939 netcp->rx_buffer_sizes,
1940 KNAV_DMA_FDQ_PER_CHAN);
1941 if (ret) {
1942 dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
1943 netcp->rx_buffer_sizes[0] = 1536;
1944 }
1945
1946 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
1947 if (ret < 0) {
1948 dev_err(dev, "missing \"rx-pool\" parameter\n");
1949 ret = -ENODEV;
1950 goto quit;
1951 }
1952 netcp->rx_pool_size = temp[0];
1953 netcp->rx_pool_region_id = temp[1];
1954
1955 ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
1956 if (ret < 0) {
1957 dev_err(dev, "missing \"tx-pool\" parameter\n");
1958 ret = -ENODEV;
1959 goto quit;
1960 }
1961 netcp->tx_pool_size = temp[0];
1962 netcp->tx_pool_region_id = temp[1];
1963
1964 if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
1965 dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
1966 MAX_SKB_FRAGS);
1967 ret = -ENODEV;
1968 goto quit;
1969 }
1970
1971 ret = of_property_read_u32(node_interface, "tx-completion-queue",
1972 &netcp->tx_compl_qid);
1973 if (ret < 0) {
1974 dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
1975 netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
1976 }
1977
1978 /* NAPI register */
1979 netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
1980 netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
1981
1982 /* Register the network device */
1983 ndev->dev_id = 0;
1984 ndev->watchdog_timeo = NETCP_TX_TIMEOUT;
1985 ndev->netdev_ops = &netcp_netdev_ops;
1986 SET_NETDEV_DEV(ndev, dev);
1987
1988 list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
1989 return 0;
1990
1991quit:
1992 free_netdev(ndev);
1993 return ret;
1994}
1995
1996static void netcp_delete_interface(struct netcp_device *netcp_device,
1997 struct net_device *ndev)
1998{
1999 struct netcp_intf_modpriv *intf_modpriv, *tmp;
2000 struct netcp_intf *netcp = netdev_priv(ndev);
2001 struct netcp_module *module;
2002
2003 dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
2004 ndev->name);
2005
2006 /* Notify each of the modules that the interface is going away */
2007 list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
2008 intf_list) {
2009 module = intf_modpriv->netcp_module;
2010 dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
2011 module->name);
2012 if (module->release)
2013 module->release(intf_modpriv->module_priv);
2014 list_del(&intf_modpriv->intf_list);
2015 kfree(intf_modpriv);
2016 }
2017 WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
2018 ndev->name);
2019
2020 list_del(&netcp->interface_list);
2021
2022 of_node_put(netcp->node_interface);
2023 unregister_netdev(ndev);
2024 netif_napi_del(&netcp->rx_napi);
2025 free_netdev(ndev);
2026}
2027
2028static int netcp_probe(struct platform_device *pdev)
2029{
2030 struct device_node *node = pdev->dev.of_node;
2031 struct netcp_intf *netcp_intf, *netcp_tmp;
2032 struct device_node *child, *interfaces;
2033 struct netcp_device *netcp_device;
2034 struct device *dev = &pdev->dev;
2035 struct netcp_module *module;
2036 int ret;
2037
2038 if (!node) {
2039 dev_err(dev, "could not find device info\n");
2040 return -ENODEV;
2041 }
2042
2043 /* Allocate a new NETCP device instance */
2044 netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
2045 if (!netcp_device)
2046 return -ENOMEM;
2047
2048 pm_runtime_enable(&pdev->dev);
2049 ret = pm_runtime_get_sync(&pdev->dev);
2050 if (ret < 0) {
2051 dev_err(dev, "Failed to enable NETCP power-domain\n");
2052 pm_runtime_disable(&pdev->dev);
2053 return ret;
2054 }
2055
2056 /* Initialize the NETCP device instance */
2057 INIT_LIST_HEAD(&netcp_device->interface_head);
2058 INIT_LIST_HEAD(&netcp_device->modpriv_head);
2059 netcp_device->device = dev;
2060 platform_set_drvdata(pdev, netcp_device);
2061
2062 /* create interfaces */
2063 interfaces = of_get_child_by_name(node, "netcp-interfaces");
2064 if (!interfaces) {
2065 dev_err(dev, "could not find netcp-interfaces node\n");
2066 ret = -ENODEV;
2067 goto probe_quit;
2068 }
2069
2070 for_each_available_child_of_node(interfaces, child) {
2071 ret = netcp_create_interface(netcp_device, child);
2072 if (ret) {
2073 dev_err(dev, "could not create interface(%s)\n",
2074 child->name);
2075 goto probe_quit_interface;
2076 }
2077 }
2078
2079 /* Add the device instance to the list */
2080 list_add_tail(&netcp_device->device_list, &netcp_devices);
2081
2082 /* Probe & attach any modules already registered */
2083 mutex_lock(&netcp_modules_lock);
2084 for_each_netcp_module(module) {
2085 ret = netcp_module_probe(netcp_device, module);
2086 if (ret < 0)
2087 dev_err(dev, "module(%s) probe failed\n", module->name);
2088 }
2089 mutex_unlock(&netcp_modules_lock);
2090 return 0;
2091
2092probe_quit_interface:
2093 list_for_each_entry_safe(netcp_intf, netcp_tmp,
2094 &netcp_device->interface_head,
2095 interface_list) {
2096 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2097 }
2098
2099probe_quit:
2100 pm_runtime_put_sync(&pdev->dev);
2101 pm_runtime_disable(&pdev->dev);
2102 platform_set_drvdata(pdev, NULL);
2103 return ret;
2104}
2105
2106static int netcp_remove(struct platform_device *pdev)
2107{
2108 struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2109 struct netcp_inst_modpriv *inst_modpriv, *tmp;
2110 struct netcp_module *module;
2111
2112 list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
2113 inst_list) {
2114 module = inst_modpriv->netcp_module;
2115 dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
2116 module->remove(netcp_device, inst_modpriv->module_priv);
2117 list_del(&inst_modpriv->inst_list);
2118 kfree(inst_modpriv);
2119 }
2120 WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
2121 pdev->name);
2122
2123 devm_kfree(&pdev->dev, netcp_device);
2124 pm_runtime_put_sync(&pdev->dev);
2125 pm_runtime_disable(&pdev->dev);
2126 platform_set_drvdata(pdev, NULL);
2127 return 0;
2128}
2129
2130static struct of_device_id of_match[] = {
2131 { .compatible = "ti,netcp-1.0", },
2132 {},
2133};
2134MODULE_DEVICE_TABLE(of, of_match);
2135
2136static struct platform_driver netcp_driver = {
2137 .driver = {
2138 .name = "netcp-1.0",
2139 .owner = THIS_MODULE,
2140 .of_match_table = of_match,
2141 },
2142 .probe = netcp_probe,
2143 .remove = netcp_remove,
2144};
2145module_platform_driver(netcp_driver);
2146
2147MODULE_LICENSE("GPL v2");
2148MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2149MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
new file mode 100644
index 000000000000..84f5ce525750
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -0,0 +1,2159 @@
1/*
2 * Keystone GBE and XGBE subsystem code
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Wingman Kwok <w-kwok2@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation version 2.
14 *
15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16 * kind, whether express or implied; without even the implied warranty
17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/io.h>
22#include <linux/module.h>
23#include <linux/of_mdio.h>
24#include <linux/of_address.h>
25#include <linux/if_vlan.h>
26#include <linux/ethtool.h>
27
28#include "cpsw_ale.h"
29#include "netcp.h"
30
31#define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
32#define NETCP_DRIVER_VERSION "v1.0"
33
34#define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
35#define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
36#define GBE_MINOR_VERSION(reg) (reg & 0xff)
37#define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
38
39/* 1G Ethernet SS defines */
40#define GBE_MODULE_NAME "netcp-gbe"
41#define GBE_SS_VERSION_14 0x4ed21104
42
43#define GBE13_SGMII_MODULE_OFFSET 0x100
44#define GBE13_SGMII34_MODULE_OFFSET 0x400
45#define GBE13_SWITCH_MODULE_OFFSET 0x800
46#define GBE13_HOST_PORT_OFFSET 0x834
47#define GBE13_SLAVE_PORT_OFFSET 0x860
48#define GBE13_EMAC_OFFSET 0x900
49#define GBE13_SLAVE_PORT2_OFFSET 0xa00
50#define GBE13_HW_STATS_OFFSET 0xb00
51#define GBE13_ALE_OFFSET 0xe00
52#define GBE13_HOST_PORT_NUM 0
53#define GBE13_NUM_SLAVES 4
54#define GBE13_NUM_ALE_PORTS (GBE13_NUM_SLAVES + 1)
55#define GBE13_NUM_ALE_ENTRIES 1024
56
57/* 10G Ethernet SS defines */
58#define XGBE_MODULE_NAME "netcp-xgbe"
59#define XGBE_SS_VERSION_10 0x4ee42100
60
61#define XGBE_SERDES_REG_INDEX 1
62#define XGBE10_SGMII_MODULE_OFFSET 0x100
63#define XGBE10_SWITCH_MODULE_OFFSET 0x1000
64#define XGBE10_HOST_PORT_OFFSET 0x1034
65#define XGBE10_SLAVE_PORT_OFFSET 0x1064
66#define XGBE10_EMAC_OFFSET 0x1400
67#define XGBE10_ALE_OFFSET 0x1700
68#define XGBE10_HW_STATS_OFFSET 0x1800
69#define XGBE10_HOST_PORT_NUM 0
70#define XGBE10_NUM_SLAVES 2
71#define XGBE10_NUM_ALE_PORTS (XGBE10_NUM_SLAVES + 1)
72#define XGBE10_NUM_ALE_ENTRIES 1024
73
74#define GBE_TIMER_INTERVAL (HZ / 2)
75
76/* Soft reset register values */
77#define SOFT_RESET_MASK BIT(0)
78#define SOFT_RESET BIT(0)
79#define DEVICE_EMACSL_RESET_POLL_COUNT 100
80#define GMACSL_RET_WARN_RESET_INCOMPLETE -2
81
82#define MACSL_RX_ENABLE_CSF BIT(23)
83#define MACSL_ENABLE_EXT_CTL BIT(18)
84#define MACSL_XGMII_ENABLE BIT(13)
85#define MACSL_XGIG_MODE BIT(8)
86#define MACSL_GIG_MODE BIT(7)
87#define MACSL_GMII_ENABLE BIT(5)
88#define MACSL_FULLDUPLEX BIT(0)
89
90#define GBE_CTL_P0_ENABLE BIT(2)
91#define GBE_REG_VAL_STAT_ENABLE_ALL 0xff
92#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
93#define GBE_STATS_CD_SEL BIT(28)
94
95#define GBE_PORT_MASK(x) (BIT(x) - 1)
96#define GBE_MASK_NO_PORTS 0
97
98#define GBE_DEF_1G_MAC_CONTROL \
99 (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
100 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
101
102#define GBE_DEF_10G_MAC_CONTROL \
103 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
104 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
105
106#define GBE_STATSA_MODULE 0
107#define GBE_STATSB_MODULE 1
108#define GBE_STATSC_MODULE 2
109#define GBE_STATSD_MODULE 3
110
111#define XGBE_STATS0_MODULE 0
112#define XGBE_STATS1_MODULE 1
113#define XGBE_STATS2_MODULE 2
114
115#define MAX_SLAVES GBE13_NUM_SLAVES
116/* s: 0-based slave_port */
117#define SGMII_BASE(s) \
118 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
119
120#define GBE_TX_QUEUE 648
121#define GBE_TXHOOK_ORDER 0
122#define GBE_DEFAULT_ALE_AGEOUT 30
123#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
124#define NETCP_LINK_STATE_INVALID -1
125
126#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
127 offsetof(struct gbe##_##rb, rn)
128#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
129 offsetof(struct xgbe##_##rb, rn)
130#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
131
132struct xgbe_ss_regs {
133 u32 id_ver;
134 u32 synce_count;
135 u32 synce_mux;
136 u32 control;
137};
138
139struct xgbe_switch_regs {
140 u32 id_ver;
141 u32 control;
142 u32 emcontrol;
143 u32 stat_port_en;
144 u32 ptype;
145 u32 soft_idle;
146 u32 thru_rate;
147 u32 gap_thresh;
148 u32 tx_start_wds;
149 u32 flow_control;
150 u32 cppi_thresh;
151};
152
153struct xgbe_port_regs {
154 u32 blk_cnt;
155 u32 port_vlan;
156 u32 tx_pri_map;
157 u32 sa_lo;
158 u32 sa_hi;
159 u32 ts_ctl;
160 u32 ts_seq_ltype;
161 u32 ts_vlan;
162 u32 ts_ctl_ltype2;
163 u32 ts_ctl2;
164 u32 control;
165};
166
167struct xgbe_host_port_regs {
168 u32 blk_cnt;
169 u32 port_vlan;
170 u32 tx_pri_map;
171 u32 src_id;
172 u32 rx_pri_map;
173 u32 rx_maxlen;
174};
175
176struct xgbe_emac_regs {
177 u32 id_ver;
178 u32 mac_control;
179 u32 mac_status;
180 u32 soft_reset;
181 u32 rx_maxlen;
182 u32 __reserved_0;
183 u32 rx_pause;
184 u32 tx_pause;
185 u32 em_control;
186 u32 __reserved_1;
187 u32 tx_gap;
188 u32 rsvd[4];
189};
190
191struct xgbe_host_hw_stats {
192 u32 rx_good_frames;
193 u32 rx_broadcast_frames;
194 u32 rx_multicast_frames;
195 u32 __rsvd_0[3];
196 u32 rx_oversized_frames;
197 u32 __rsvd_1;
198 u32 rx_undersized_frames;
199 u32 __rsvd_2;
200 u32 overrun_type4;
201 u32 overrun_type5;
202 u32 rx_bytes;
203 u32 tx_good_frames;
204 u32 tx_broadcast_frames;
205 u32 tx_multicast_frames;
206 u32 __rsvd_3[9];
207 u32 tx_bytes;
208 u32 tx_64byte_frames;
209 u32 tx_65_to_127byte_frames;
210 u32 tx_128_to_255byte_frames;
211 u32 tx_256_to_511byte_frames;
212 u32 tx_512_to_1023byte_frames;
213 u32 tx_1024byte_frames;
214 u32 net_bytes;
215 u32 rx_sof_overruns;
216 u32 rx_mof_overruns;
217 u32 rx_dma_overruns;
218};
219
220struct xgbe_hw_stats {
221 u32 rx_good_frames;
222 u32 rx_broadcast_frames;
223 u32 rx_multicast_frames;
224 u32 rx_pause_frames;
225 u32 rx_crc_errors;
226 u32 rx_align_code_errors;
227 u32 rx_oversized_frames;
228 u32 rx_jabber_frames;
229 u32 rx_undersized_frames;
230 u32 rx_fragments;
231 u32 overrun_type4;
232 u32 overrun_type5;
233 u32 rx_bytes;
234 u32 tx_good_frames;
235 u32 tx_broadcast_frames;
236 u32 tx_multicast_frames;
237 u32 tx_pause_frames;
238 u32 tx_deferred_frames;
239 u32 tx_collision_frames;
240 u32 tx_single_coll_frames;
241 u32 tx_mult_coll_frames;
242 u32 tx_excessive_collisions;
243 u32 tx_late_collisions;
244 u32 tx_underrun;
245 u32 tx_carrier_sense_errors;
246 u32 tx_bytes;
247 u32 tx_64byte_frames;
248 u32 tx_65_to_127byte_frames;
249 u32 tx_128_to_255byte_frames;
250 u32 tx_256_to_511byte_frames;
251 u32 tx_512_to_1023byte_frames;
252 u32 tx_1024byte_frames;
253 u32 net_bytes;
254 u32 rx_sof_overruns;
255 u32 rx_mof_overruns;
256 u32 rx_dma_overruns;
257};
258
259#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
260
261struct gbe_ss_regs {
262 u32 id_ver;
263 u32 synce_count;
264 u32 synce_mux;
265};
266
267struct gbe_ss_regs_ofs {
268 u16 id_ver;
269 u16 control;
270};
271
272struct gbe_switch_regs {
273 u32 id_ver;
274 u32 control;
275 u32 soft_reset;
276 u32 stat_port_en;
277 u32 ptype;
278 u32 soft_idle;
279 u32 thru_rate;
280 u32 gap_thresh;
281 u32 tx_start_wds;
282 u32 flow_control;
283};
284
285struct gbe_switch_regs_ofs {
286 u16 id_ver;
287 u16 control;
288 u16 soft_reset;
289 u16 emcontrol;
290 u16 stat_port_en;
291 u16 ptype;
292 u16 flow_control;
293};
294
295struct gbe_port_regs {
296 u32 max_blks;
297 u32 blk_cnt;
298 u32 port_vlan;
299 u32 tx_pri_map;
300 u32 sa_lo;
301 u32 sa_hi;
302 u32 ts_ctl;
303 u32 ts_seq_ltype;
304 u32 ts_vlan;
305 u32 ts_ctl_ltype2;
306 u32 ts_ctl2;
307};
308
309struct gbe_port_regs_ofs {
310 u16 port_vlan;
311 u16 tx_pri_map;
312 u16 sa_lo;
313 u16 sa_hi;
314 u16 ts_ctl;
315 u16 ts_seq_ltype;
316 u16 ts_vlan;
317 u16 ts_ctl_ltype2;
318 u16 ts_ctl2;
319};
320
321struct gbe_host_port_regs {
322 u32 src_id;
323 u32 port_vlan;
324 u32 rx_pri_map;
325 u32 rx_maxlen;
326};
327
328struct gbe_host_port_regs_ofs {
329 u16 port_vlan;
330 u16 tx_pri_map;
331 u16 rx_maxlen;
332};
333
334struct gbe_emac_regs {
335 u32 id_ver;
336 u32 mac_control;
337 u32 mac_status;
338 u32 soft_reset;
339 u32 rx_maxlen;
340 u32 __reserved_0;
341 u32 rx_pause;
342 u32 tx_pause;
343 u32 __reserved_1;
344 u32 rx_pri_map;
345 u32 rsvd[6];
346};
347
348struct gbe_emac_regs_ofs {
349 u16 mac_control;
350 u16 soft_reset;
351 u16 rx_maxlen;
352};
353
354struct gbe_hw_stats {
355 u32 rx_good_frames;
356 u32 rx_broadcast_frames;
357 u32 rx_multicast_frames;
358 u32 rx_pause_frames;
359 u32 rx_crc_errors;
360 u32 rx_align_code_errors;
361 u32 rx_oversized_frames;
362 u32 rx_jabber_frames;
363 u32 rx_undersized_frames;
364 u32 rx_fragments;
365 u32 __pad_0[2];
366 u32 rx_bytes;
367 u32 tx_good_frames;
368 u32 tx_broadcast_frames;
369 u32 tx_multicast_frames;
370 u32 tx_pause_frames;
371 u32 tx_deferred_frames;
372 u32 tx_collision_frames;
373 u32 tx_single_coll_frames;
374 u32 tx_mult_coll_frames;
375 u32 tx_excessive_collisions;
376 u32 tx_late_collisions;
377 u32 tx_underrun;
378 u32 tx_carrier_sense_errors;
379 u32 tx_bytes;
380 u32 tx_64byte_frames;
381 u32 tx_65_to_127byte_frames;
382 u32 tx_128_to_255byte_frames;
383 u32 tx_256_to_511byte_frames;
384 u32 tx_512_to_1023byte_frames;
385 u32 tx_1024byte_frames;
386 u32 net_bytes;
387 u32 rx_sof_overruns;
388 u32 rx_mof_overruns;
389 u32 rx_dma_overruns;
390};
391
392#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
393#define GBE13_NUM_HW_STATS_MOD 2
394#define XGBE10_NUM_HW_STATS_MOD 3
395#define GBE_MAX_HW_STAT_MODS 3
396#define GBE_HW_STATS_REG_MAP_SZ 0x100
397
398struct gbe_slave {
399 void __iomem *port_regs;
400 void __iomem *emac_regs;
401 struct gbe_port_regs_ofs port_regs_ofs;
402 struct gbe_emac_regs_ofs emac_regs_ofs;
403 int slave_num; /* 0 based logical number */
404 int port_num; /* actual port number */
405 atomic_t link_state;
406 bool open;
407 struct phy_device *phy;
408 u32 link_interface;
409 u32 mac_control;
410 u8 phy_port_t;
411 struct device_node *phy_node;
412 struct list_head slave_list;
413};
414
415struct gbe_priv {
416 struct device *dev;
417 struct netcp_device *netcp_device;
418 struct timer_list timer;
419 u32 num_slaves;
420 u32 ale_entries;
421 u32 ale_ports;
422 bool enable_ale;
423 struct netcp_tx_pipe tx_pipe;
424
425 int host_port;
426 u32 rx_packet_max;
427 u32 ss_version;
428
429 void __iomem *ss_regs;
430 void __iomem *switch_regs;
431 void __iomem *host_port_regs;
432 void __iomem *ale_reg;
433 void __iomem *sgmii_port_regs;
434 void __iomem *sgmii_port34_regs;
435 void __iomem *xgbe_serdes_regs;
436 void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
437
438 struct gbe_ss_regs_ofs ss_regs_ofs;
439 struct gbe_switch_regs_ofs switch_regs_ofs;
440 struct gbe_host_port_regs_ofs host_port_regs_ofs;
441
442 struct cpsw_ale *ale;
443 unsigned int tx_queue_id;
444 const char *dma_chan_name;
445
446 struct list_head gbe_intf_head;
447 struct list_head secondary_slaves;
448 struct net_device *dummy_ndev;
449
450 u64 *hw_stats;
451 const struct netcp_ethtool_stat *et_stats;
452 int num_et_stats;
453 /* Lock for updating the hwstats */
454 spinlock_t hw_stats_lock;
455};
456
457struct gbe_intf {
458 struct net_device *ndev;
459 struct device *dev;
460 struct gbe_priv *gbe_dev;
461 struct netcp_tx_pipe tx_pipe;
462 struct gbe_slave *slave;
463 struct list_head gbe_intf_list;
464 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
465};
466
467static struct netcp_module gbe_module;
468static struct netcp_module xgbe_module;
469
470/* Statistic management */
471struct netcp_ethtool_stat {
472 char desc[ETH_GSTRING_LEN];
473 int type;
474 u32 size;
475 int offset;
476};
477
478#define GBE_STATSA_INFO(field) "GBE_A:"#field, GBE_STATSA_MODULE,\
479 FIELD_SIZEOF(struct gbe_hw_stats, field), \
480 offsetof(struct gbe_hw_stats, field)
481
482#define GBE_STATSB_INFO(field) "GBE_B:"#field, GBE_STATSB_MODULE,\
483 FIELD_SIZEOF(struct gbe_hw_stats, field), \
484 offsetof(struct gbe_hw_stats, field)
485
486#define GBE_STATSC_INFO(field) "GBE_C:"#field, GBE_STATSC_MODULE,\
487 FIELD_SIZEOF(struct gbe_hw_stats, field), \
488 offsetof(struct gbe_hw_stats, field)
489
490#define GBE_STATSD_INFO(field) "GBE_D:"#field, GBE_STATSD_MODULE,\
491 FIELD_SIZEOF(struct gbe_hw_stats, field), \
492 offsetof(struct gbe_hw_stats, field)
493
494static const struct netcp_ethtool_stat gbe13_et_stats[] = {
495 /* GBE module A */
496 {GBE_STATSA_INFO(rx_good_frames)},
497 {GBE_STATSA_INFO(rx_broadcast_frames)},
498 {GBE_STATSA_INFO(rx_multicast_frames)},
499 {GBE_STATSA_INFO(rx_pause_frames)},
500 {GBE_STATSA_INFO(rx_crc_errors)},
501 {GBE_STATSA_INFO(rx_align_code_errors)},
502 {GBE_STATSA_INFO(rx_oversized_frames)},
503 {GBE_STATSA_INFO(rx_jabber_frames)},
504 {GBE_STATSA_INFO(rx_undersized_frames)},
505 {GBE_STATSA_INFO(rx_fragments)},
506 {GBE_STATSA_INFO(rx_bytes)},
507 {GBE_STATSA_INFO(tx_good_frames)},
508 {GBE_STATSA_INFO(tx_broadcast_frames)},
509 {GBE_STATSA_INFO(tx_multicast_frames)},
510 {GBE_STATSA_INFO(tx_pause_frames)},
511 {GBE_STATSA_INFO(tx_deferred_frames)},
512 {GBE_STATSA_INFO(tx_collision_frames)},
513 {GBE_STATSA_INFO(tx_single_coll_frames)},
514 {GBE_STATSA_INFO(tx_mult_coll_frames)},
515 {GBE_STATSA_INFO(tx_excessive_collisions)},
516 {GBE_STATSA_INFO(tx_late_collisions)},
517 {GBE_STATSA_INFO(tx_underrun)},
518 {GBE_STATSA_INFO(tx_carrier_sense_errors)},
519 {GBE_STATSA_INFO(tx_bytes)},
520 {GBE_STATSA_INFO(tx_64byte_frames)},
521 {GBE_STATSA_INFO(tx_65_to_127byte_frames)},
522 {GBE_STATSA_INFO(tx_128_to_255byte_frames)},
523 {GBE_STATSA_INFO(tx_256_to_511byte_frames)},
524 {GBE_STATSA_INFO(tx_512_to_1023byte_frames)},
525 {GBE_STATSA_INFO(tx_1024byte_frames)},
526 {GBE_STATSA_INFO(net_bytes)},
527 {GBE_STATSA_INFO(rx_sof_overruns)},
528 {GBE_STATSA_INFO(rx_mof_overruns)},
529 {GBE_STATSA_INFO(rx_dma_overruns)},
530 /* GBE module B */
531 {GBE_STATSB_INFO(rx_good_frames)},
532 {GBE_STATSB_INFO(rx_broadcast_frames)},
533 {GBE_STATSB_INFO(rx_multicast_frames)},
534 {GBE_STATSB_INFO(rx_pause_frames)},
535 {GBE_STATSB_INFO(rx_crc_errors)},
536 {GBE_STATSB_INFO(rx_align_code_errors)},
537 {GBE_STATSB_INFO(rx_oversized_frames)},
538 {GBE_STATSB_INFO(rx_jabber_frames)},
539 {GBE_STATSB_INFO(rx_undersized_frames)},
540 {GBE_STATSB_INFO(rx_fragments)},
541 {GBE_STATSB_INFO(rx_bytes)},
542 {GBE_STATSB_INFO(tx_good_frames)},
543 {GBE_STATSB_INFO(tx_broadcast_frames)},
544 {GBE_STATSB_INFO(tx_multicast_frames)},
545 {GBE_STATSB_INFO(tx_pause_frames)},
546 {GBE_STATSB_INFO(tx_deferred_frames)},
547 {GBE_STATSB_INFO(tx_collision_frames)},
548 {GBE_STATSB_INFO(tx_single_coll_frames)},
549 {GBE_STATSB_INFO(tx_mult_coll_frames)},
550 {GBE_STATSB_INFO(tx_excessive_collisions)},
551 {GBE_STATSB_INFO(tx_late_collisions)},
552 {GBE_STATSB_INFO(tx_underrun)},
553 {GBE_STATSB_INFO(tx_carrier_sense_errors)},
554 {GBE_STATSB_INFO(tx_bytes)},
555 {GBE_STATSB_INFO(tx_64byte_frames)},
556 {GBE_STATSB_INFO(tx_65_to_127byte_frames)},
557 {GBE_STATSB_INFO(tx_128_to_255byte_frames)},
558 {GBE_STATSB_INFO(tx_256_to_511byte_frames)},
559 {GBE_STATSB_INFO(tx_512_to_1023byte_frames)},
560 {GBE_STATSB_INFO(tx_1024byte_frames)},
561 {GBE_STATSB_INFO(net_bytes)},
562 {GBE_STATSB_INFO(rx_sof_overruns)},
563 {GBE_STATSB_INFO(rx_mof_overruns)},
564 {GBE_STATSB_INFO(rx_dma_overruns)},
565 /* GBE module C */
566 {GBE_STATSC_INFO(rx_good_frames)},
567 {GBE_STATSC_INFO(rx_broadcast_frames)},
568 {GBE_STATSC_INFO(rx_multicast_frames)},
569 {GBE_STATSC_INFO(rx_pause_frames)},
570 {GBE_STATSC_INFO(rx_crc_errors)},
571 {GBE_STATSC_INFO(rx_align_code_errors)},
572 {GBE_STATSC_INFO(rx_oversized_frames)},
573 {GBE_STATSC_INFO(rx_jabber_frames)},
574 {GBE_STATSC_INFO(rx_undersized_frames)},
575 {GBE_STATSC_INFO(rx_fragments)},
576 {GBE_STATSC_INFO(rx_bytes)},
577 {GBE_STATSC_INFO(tx_good_frames)},
578 {GBE_STATSC_INFO(tx_broadcast_frames)},
579 {GBE_STATSC_INFO(tx_multicast_frames)},
580 {GBE_STATSC_INFO(tx_pause_frames)},
581 {GBE_STATSC_INFO(tx_deferred_frames)},
582 {GBE_STATSC_INFO(tx_collision_frames)},
583 {GBE_STATSC_INFO(tx_single_coll_frames)},
584 {GBE_STATSC_INFO(tx_mult_coll_frames)},
585 {GBE_STATSC_INFO(tx_excessive_collisions)},
586 {GBE_STATSC_INFO(tx_late_collisions)},
587 {GBE_STATSC_INFO(tx_underrun)},
588 {GBE_STATSC_INFO(tx_carrier_sense_errors)},
589 {GBE_STATSC_INFO(tx_bytes)},
590 {GBE_STATSC_INFO(tx_64byte_frames)},
591 {GBE_STATSC_INFO(tx_65_to_127byte_frames)},
592 {GBE_STATSC_INFO(tx_128_to_255byte_frames)},
593 {GBE_STATSC_INFO(tx_256_to_511byte_frames)},
594 {GBE_STATSC_INFO(tx_512_to_1023byte_frames)},
595 {GBE_STATSC_INFO(tx_1024byte_frames)},
596 {GBE_STATSC_INFO(net_bytes)},
597 {GBE_STATSC_INFO(rx_sof_overruns)},
598 {GBE_STATSC_INFO(rx_mof_overruns)},
599 {GBE_STATSC_INFO(rx_dma_overruns)},
600 /* GBE module D */
601 {GBE_STATSD_INFO(rx_good_frames)},
602 {GBE_STATSD_INFO(rx_broadcast_frames)},
603 {GBE_STATSD_INFO(rx_multicast_frames)},
604 {GBE_STATSD_INFO(rx_pause_frames)},
605 {GBE_STATSD_INFO(rx_crc_errors)},
606 {GBE_STATSD_INFO(rx_align_code_errors)},
607 {GBE_STATSD_INFO(rx_oversized_frames)},
608 {GBE_STATSD_INFO(rx_jabber_frames)},
609 {GBE_STATSD_INFO(rx_undersized_frames)},
610 {GBE_STATSD_INFO(rx_fragments)},
611 {GBE_STATSD_INFO(rx_bytes)},
612 {GBE_STATSD_INFO(tx_good_frames)},
613 {GBE_STATSD_INFO(tx_broadcast_frames)},
614 {GBE_STATSD_INFO(tx_multicast_frames)},
615 {GBE_STATSD_INFO(tx_pause_frames)},
616 {GBE_STATSD_INFO(tx_deferred_frames)},
617 {GBE_STATSD_INFO(tx_collision_frames)},
618 {GBE_STATSD_INFO(tx_single_coll_frames)},
619 {GBE_STATSD_INFO(tx_mult_coll_frames)},
620 {GBE_STATSD_INFO(tx_excessive_collisions)},
621 {GBE_STATSD_INFO(tx_late_collisions)},
622 {GBE_STATSD_INFO(tx_underrun)},
623 {GBE_STATSD_INFO(tx_carrier_sense_errors)},
624 {GBE_STATSD_INFO(tx_bytes)},
625 {GBE_STATSD_INFO(tx_64byte_frames)},
626 {GBE_STATSD_INFO(tx_65_to_127byte_frames)},
627 {GBE_STATSD_INFO(tx_128_to_255byte_frames)},
628 {GBE_STATSD_INFO(tx_256_to_511byte_frames)},
629 {GBE_STATSD_INFO(tx_512_to_1023byte_frames)},
630 {GBE_STATSD_INFO(tx_1024byte_frames)},
631 {GBE_STATSD_INFO(net_bytes)},
632 {GBE_STATSD_INFO(rx_sof_overruns)},
633 {GBE_STATSD_INFO(rx_mof_overruns)},
634 {GBE_STATSD_INFO(rx_dma_overruns)},
635};
636
637#define XGBE_STATS0_INFO(field) "GBE_0:"#field, XGBE_STATS0_MODULE, \
638 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
639 offsetof(struct xgbe_hw_stats, field)
640
641#define XGBE_STATS1_INFO(field) "GBE_1:"#field, XGBE_STATS1_MODULE, \
642 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
643 offsetof(struct xgbe_hw_stats, field)
644
645#define XGBE_STATS2_INFO(field) "GBE_2:"#field, XGBE_STATS2_MODULE, \
646 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
647 offsetof(struct xgbe_hw_stats, field)
648
649static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
650 /* GBE module 0 */
651 {XGBE_STATS0_INFO(rx_good_frames)},
652 {XGBE_STATS0_INFO(rx_broadcast_frames)},
653 {XGBE_STATS0_INFO(rx_multicast_frames)},
654 {XGBE_STATS0_INFO(rx_oversized_frames)},
655 {XGBE_STATS0_INFO(rx_undersized_frames)},
656 {XGBE_STATS0_INFO(overrun_type4)},
657 {XGBE_STATS0_INFO(overrun_type5)},
658 {XGBE_STATS0_INFO(rx_bytes)},
659 {XGBE_STATS0_INFO(tx_good_frames)},
660 {XGBE_STATS0_INFO(tx_broadcast_frames)},
661 {XGBE_STATS0_INFO(tx_multicast_frames)},
662 {XGBE_STATS0_INFO(tx_bytes)},
663 {XGBE_STATS0_INFO(tx_64byte_frames)},
664 {XGBE_STATS0_INFO(tx_65_to_127byte_frames)},
665 {XGBE_STATS0_INFO(tx_128_to_255byte_frames)},
666 {XGBE_STATS0_INFO(tx_256_to_511byte_frames)},
667 {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)},
668 {XGBE_STATS0_INFO(tx_1024byte_frames)},
669 {XGBE_STATS0_INFO(net_bytes)},
670 {XGBE_STATS0_INFO(rx_sof_overruns)},
671 {XGBE_STATS0_INFO(rx_mof_overruns)},
672 {XGBE_STATS0_INFO(rx_dma_overruns)},
673 /* XGBE module 1 */
674 {XGBE_STATS1_INFO(rx_good_frames)},
675 {XGBE_STATS1_INFO(rx_broadcast_frames)},
676 {XGBE_STATS1_INFO(rx_multicast_frames)},
677 {XGBE_STATS1_INFO(rx_pause_frames)},
678 {XGBE_STATS1_INFO(rx_crc_errors)},
679 {XGBE_STATS1_INFO(rx_align_code_errors)},
680 {XGBE_STATS1_INFO(rx_oversized_frames)},
681 {XGBE_STATS1_INFO(rx_jabber_frames)},
682 {XGBE_STATS1_INFO(rx_undersized_frames)},
683 {XGBE_STATS1_INFO(rx_fragments)},
684 {XGBE_STATS1_INFO(overrun_type4)},
685 {XGBE_STATS1_INFO(overrun_type5)},
686 {XGBE_STATS1_INFO(rx_bytes)},
687 {XGBE_STATS1_INFO(tx_good_frames)},
688 {XGBE_STATS1_INFO(tx_broadcast_frames)},
689 {XGBE_STATS1_INFO(tx_multicast_frames)},
690 {XGBE_STATS1_INFO(tx_pause_frames)},
691 {XGBE_STATS1_INFO(tx_deferred_frames)},
692 {XGBE_STATS1_INFO(tx_collision_frames)},
693 {XGBE_STATS1_INFO(tx_single_coll_frames)},
694 {XGBE_STATS1_INFO(tx_mult_coll_frames)},
695 {XGBE_STATS1_INFO(tx_excessive_collisions)},
696 {XGBE_STATS1_INFO(tx_late_collisions)},
697 {XGBE_STATS1_INFO(tx_underrun)},
698 {XGBE_STATS1_INFO(tx_carrier_sense_errors)},
699 {XGBE_STATS1_INFO(tx_bytes)},
700 {XGBE_STATS1_INFO(tx_64byte_frames)},
701 {XGBE_STATS1_INFO(tx_65_to_127byte_frames)},
702 {XGBE_STATS1_INFO(tx_128_to_255byte_frames)},
703 {XGBE_STATS1_INFO(tx_256_to_511byte_frames)},
704 {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)},
705 {XGBE_STATS1_INFO(tx_1024byte_frames)},
706 {XGBE_STATS1_INFO(net_bytes)},
707 {XGBE_STATS1_INFO(rx_sof_overruns)},
708 {XGBE_STATS1_INFO(rx_mof_overruns)},
709 {XGBE_STATS1_INFO(rx_dma_overruns)},
710 /* XGBE module 2 */
711 {XGBE_STATS2_INFO(rx_good_frames)},
712 {XGBE_STATS2_INFO(rx_broadcast_frames)},
713 {XGBE_STATS2_INFO(rx_multicast_frames)},
714 {XGBE_STATS2_INFO(rx_pause_frames)},
715 {XGBE_STATS2_INFO(rx_crc_errors)},
716 {XGBE_STATS2_INFO(rx_align_code_errors)},
717 {XGBE_STATS2_INFO(rx_oversized_frames)},
718 {XGBE_STATS2_INFO(rx_jabber_frames)},
719 {XGBE_STATS2_INFO(rx_undersized_frames)},
720 {XGBE_STATS2_INFO(rx_fragments)},
721 {XGBE_STATS2_INFO(overrun_type4)},
722 {XGBE_STATS2_INFO(overrun_type5)},
723 {XGBE_STATS2_INFO(rx_bytes)},
724 {XGBE_STATS2_INFO(tx_good_frames)},
725 {XGBE_STATS2_INFO(tx_broadcast_frames)},
726 {XGBE_STATS2_INFO(tx_multicast_frames)},
727 {XGBE_STATS2_INFO(tx_pause_frames)},
728 {XGBE_STATS2_INFO(tx_deferred_frames)},
729 {XGBE_STATS2_INFO(tx_collision_frames)},
730 {XGBE_STATS2_INFO(tx_single_coll_frames)},
731 {XGBE_STATS2_INFO(tx_mult_coll_frames)},
732 {XGBE_STATS2_INFO(tx_excessive_collisions)},
733 {XGBE_STATS2_INFO(tx_late_collisions)},
734 {XGBE_STATS2_INFO(tx_underrun)},
735 {XGBE_STATS2_INFO(tx_carrier_sense_errors)},
736 {XGBE_STATS2_INFO(tx_bytes)},
737 {XGBE_STATS2_INFO(tx_64byte_frames)},
738 {XGBE_STATS2_INFO(tx_65_to_127byte_frames)},
739 {XGBE_STATS2_INFO(tx_128_to_255byte_frames)},
740 {XGBE_STATS2_INFO(tx_256_to_511byte_frames)},
741 {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)},
742 {XGBE_STATS2_INFO(tx_1024byte_frames)},
743 {XGBE_STATS2_INFO(net_bytes)},
744 {XGBE_STATS2_INFO(rx_sof_overruns)},
745 {XGBE_STATS2_INFO(rx_mof_overruns)},
746 {XGBE_STATS2_INFO(rx_dma_overruns)},
747};
748
749#define for_each_intf(i, priv) \
750 list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
751
752#define for_each_sec_slave(slave, priv) \
753 list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
754
755#define first_sec_slave(priv) \
756 list_first_entry(&priv->secondary_slaves, \
757 struct gbe_slave, slave_list)
758
759static void keystone_get_drvinfo(struct net_device *ndev,
760 struct ethtool_drvinfo *info)
761{
762 strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
763 strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
764}
765
766static u32 keystone_get_msglevel(struct net_device *ndev)
767{
768 struct netcp_intf *netcp = netdev_priv(ndev);
769
770 return netcp->msg_enable;
771}
772
773static void keystone_set_msglevel(struct net_device *ndev, u32 value)
774{
775 struct netcp_intf *netcp = netdev_priv(ndev);
776
777 netcp->msg_enable = value;
778}
779
780static void keystone_get_stat_strings(struct net_device *ndev,
781 uint32_t stringset, uint8_t *data)
782{
783 struct netcp_intf *netcp = netdev_priv(ndev);
784 struct gbe_intf *gbe_intf;
785 struct gbe_priv *gbe_dev;
786 int i;
787
788 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
789 if (!gbe_intf)
790 return;
791 gbe_dev = gbe_intf->gbe_dev;
792
793 switch (stringset) {
794 case ETH_SS_STATS:
795 for (i = 0; i < gbe_dev->num_et_stats; i++) {
796 memcpy(data, gbe_dev->et_stats[i].desc,
797 ETH_GSTRING_LEN);
798 data += ETH_GSTRING_LEN;
799 }
800 break;
801 case ETH_SS_TEST:
802 break;
803 }
804}
805
806static int keystone_get_sset_count(struct net_device *ndev, int stringset)
807{
808 struct netcp_intf *netcp = netdev_priv(ndev);
809 struct gbe_intf *gbe_intf;
810 struct gbe_priv *gbe_dev;
811
812 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
813 if (!gbe_intf)
814 return -EINVAL;
815 gbe_dev = gbe_intf->gbe_dev;
816
817 switch (stringset) {
818 case ETH_SS_TEST:
819 return 0;
820 case ETH_SS_STATS:
821 return gbe_dev->num_et_stats;
822 default:
823 return -EINVAL;
824 }
825}
826
827static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
828{
829 void __iomem *base = NULL;
830 u32 __iomem *p;
831 u32 tmp = 0;
832 int i;
833
834 for (i = 0; i < gbe_dev->num_et_stats; i++) {
835 base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
836 p = base + gbe_dev->et_stats[i].offset;
837 tmp = readl(p);
838 gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
839 if (data)
840 data[i] = gbe_dev->hw_stats[i];
841 /* write-to-decrement:
842 * new register value = old register value - write value
843 */
844 writel(tmp, p);
845 }
846}
847
848static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
849{
850 void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
851 void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
852 u64 *hw_stats = &gbe_dev->hw_stats[0];
853 void __iomem *base = NULL;
854 u32 __iomem *p;
855 u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
856 int i, j, pair;
857
858 for (pair = 0; pair < 2; pair++) {
859 val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
860
861 if (pair == 0)
862 val &= ~GBE_STATS_CD_SEL;
863 else
864 val |= GBE_STATS_CD_SEL;
865
866 /* make the stat modules visible */
867 writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
868
869 for (i = 0; i < pair_size; i++) {
870 j = pair * pair_size + i;
871 switch (gbe_dev->et_stats[j].type) {
872 case GBE_STATSA_MODULE:
873 case GBE_STATSC_MODULE:
874 base = gbe_statsa;
875 break;
876 case GBE_STATSB_MODULE:
877 case GBE_STATSD_MODULE:
878 base = gbe_statsb;
879 break;
880 }
881
882 p = base + gbe_dev->et_stats[j].offset;
883 tmp = readl(p);
884 hw_stats[j] += tmp;
885 if (data)
886 data[j] = hw_stats[j];
887 /* write-to-decrement:
888 * new register value = old register value - write value
889 */
890 writel(tmp, p);
891 }
892 }
893}
894
895static void keystone_get_ethtool_stats(struct net_device *ndev,
896 struct ethtool_stats *stats,
897 uint64_t *data)
898{
899 struct netcp_intf *netcp = netdev_priv(ndev);
900 struct gbe_intf *gbe_intf;
901 struct gbe_priv *gbe_dev;
902
903 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
904 if (!gbe_intf)
905 return;
906
907 gbe_dev = gbe_intf->gbe_dev;
908 spin_lock_bh(&gbe_dev->hw_stats_lock);
909 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
910 gbe_update_stats_ver14(gbe_dev, data);
911 else
912 gbe_update_stats(gbe_dev, data);
913 spin_unlock_bh(&gbe_dev->hw_stats_lock);
914}
915
916static int keystone_get_settings(struct net_device *ndev,
917 struct ethtool_cmd *cmd)
918{
919 struct netcp_intf *netcp = netdev_priv(ndev);
920 struct phy_device *phy = ndev->phydev;
921 struct gbe_intf *gbe_intf;
922 int ret;
923
924 if (!phy)
925 return -EINVAL;
926
927 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
928 if (!gbe_intf)
929 return -EINVAL;
930
931 if (!gbe_intf->slave)
932 return -EINVAL;
933
934 ret = phy_ethtool_gset(phy, cmd);
935 if (!ret)
936 cmd->port = gbe_intf->slave->phy_port_t;
937
938 return ret;
939}
940
941static int keystone_set_settings(struct net_device *ndev,
942 struct ethtool_cmd *cmd)
943{
944 struct netcp_intf *netcp = netdev_priv(ndev);
945 struct phy_device *phy = ndev->phydev;
946 struct gbe_intf *gbe_intf;
947 u32 features = cmd->advertising & cmd->supported;
948
949 if (!phy)
950 return -EINVAL;
951
952 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
953 if (!gbe_intf)
954 return -EINVAL;
955
956 if (!gbe_intf->slave)
957 return -EINVAL;
958
959 if (cmd->port != gbe_intf->slave->phy_port_t) {
960 if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
961 return -EINVAL;
962
963 if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
964 return -EINVAL;
965
966 if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
967 return -EINVAL;
968
969 if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
970 return -EINVAL;
971
972 if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
973 return -EINVAL;
974 }
975
976 gbe_intf->slave->phy_port_t = cmd->port;
977 return phy_ethtool_sset(phy, cmd);
978}
979
980static const struct ethtool_ops keystone_ethtool_ops = {
981 .get_drvinfo = keystone_get_drvinfo,
982 .get_link = ethtool_op_get_link,
983 .get_msglevel = keystone_get_msglevel,
984 .set_msglevel = keystone_set_msglevel,
985 .get_strings = keystone_get_stat_strings,
986 .get_sset_count = keystone_get_sset_count,
987 .get_ethtool_stats = keystone_get_ethtool_stats,
988 .get_settings = keystone_get_settings,
989 .set_settings = keystone_set_settings,
990};
991
992#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
993 ((mac)[2] << 16) | ((mac)[3] << 24))
994#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
995
996static void gbe_set_slave_mac(struct gbe_slave *slave,
997 struct gbe_intf *gbe_intf)
998{
999 struct net_device *ndev = gbe_intf->ndev;
1000
1001 writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
1002 writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
1003}
1004
1005static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
1006{
1007 if (priv->host_port == 0)
1008 return slave_num + 1;
1009
1010 return slave_num;
1011}
1012
1013static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1014 struct net_device *ndev,
1015 struct gbe_slave *slave,
1016 int up)
1017{
1018 struct phy_device *phy = slave->phy;
1019 u32 mac_control = 0;
1020
1021 if (up) {
1022 mac_control = slave->mac_control;
1023 if (phy && (phy->speed == SPEED_1000)) {
1024 mac_control |= MACSL_GIG_MODE;
1025 mac_control &= ~MACSL_XGIG_MODE;
1026 } else if (phy && (phy->speed == SPEED_10000)) {
1027 mac_control |= MACSL_XGIG_MODE;
1028 mac_control &= ~MACSL_GIG_MODE;
1029 }
1030
1031 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1032 mac_control));
1033
1034 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1035 ALE_PORT_STATE,
1036 ALE_PORT_STATE_FORWARD);
1037
1038 if (ndev && slave->open)
1039 netif_carrier_on(ndev);
1040 } else {
1041 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1042 mac_control));
1043 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1044 ALE_PORT_STATE,
1045 ALE_PORT_STATE_DISABLE);
1046 if (ndev)
1047 netif_carrier_off(ndev);
1048 }
1049
1050 if (phy)
1051 phy_print_status(phy);
1052}
1053
1054static bool gbe_phy_link_status(struct gbe_slave *slave)
1055{
1056 return !slave->phy || slave->phy->link;
1057}
1058
1059static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1060 struct gbe_slave *slave,
1061 struct net_device *ndev)
1062{
1063 int sp = slave->slave_num;
1064 int phy_link_state, sgmii_link_state = 1, link_state;
1065
1066 if (!slave->open)
1067 return;
1068
1069 if (!SLAVE_LINK_IS_XGMII(slave))
1070 sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp),
1071 sp);
1072 phy_link_state = gbe_phy_link_status(slave);
1073 link_state = phy_link_state & sgmii_link_state;
1074
1075 if (atomic_xchg(&slave->link_state, link_state) != link_state)
1076 netcp_ethss_link_state_action(gbe_dev, ndev, slave,
1077 link_state);
1078}
1079
1080static void xgbe_adjust_link(struct net_device *ndev)
1081{
1082 struct netcp_intf *netcp = netdev_priv(ndev);
1083 struct gbe_intf *gbe_intf;
1084
1085 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1086 if (!gbe_intf)
1087 return;
1088
1089 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1090 ndev);
1091}
1092
1093static void gbe_adjust_link(struct net_device *ndev)
1094{
1095 struct netcp_intf *netcp = netdev_priv(ndev);
1096 struct gbe_intf *gbe_intf;
1097
1098 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1099 if (!gbe_intf)
1100 return;
1101
1102 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1103 ndev);
1104}
1105
1106static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
1107{
1108 struct gbe_priv *gbe_dev = netdev_priv(ndev);
1109 struct gbe_slave *slave;
1110
1111 for_each_sec_slave(slave, gbe_dev)
1112 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1113}
1114
1115/* Reset EMAC
1116 * Soft reset is set and polled until clear, or until a timeout occurs
1117 */
1118static int gbe_port_reset(struct gbe_slave *slave)
1119{
1120 u32 i, v;
1121
1122 /* Set the soft reset bit */
1123 writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
1124
1125 /* Wait for the bit to clear */
1126 for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
1127 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
1128 if ((v & SOFT_RESET_MASK) != SOFT_RESET)
1129 return 0;
1130 }
1131
1132 /* Timeout on the reset */
1133 return GMACSL_RET_WARN_RESET_INCOMPLETE;
1134}
1135
1136/* Configure EMAC */
1137static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1138 int max_rx_len)
1139{
1140 u32 xgmii_mode;
1141
1142 if (max_rx_len > NETCP_MAX_FRAME_SIZE)
1143 max_rx_len = NETCP_MAX_FRAME_SIZE;
1144
1145 /* Enable correct MII mode at SS level */
1146 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
1147 (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
1148 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
1149 xgmii_mode |= (1 << slave->slave_num);
1150 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
1151 }
1152
1153 writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen));
1154 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1155}
1156
1157static void gbe_slave_stop(struct gbe_intf *intf)
1158{
1159 struct gbe_priv *gbe_dev = intf->gbe_dev;
1160 struct gbe_slave *slave = intf->slave;
1161
1162 gbe_port_reset(slave);
1163 /* Disable forwarding */
1164 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1165 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1166 cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
1167 1 << slave->port_num, 0, 0);
1168
1169 if (!slave->phy)
1170 return;
1171
1172 phy_stop(slave->phy);
1173 phy_disconnect(slave->phy);
1174 slave->phy = NULL;
1175}
1176
1177static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
1178{
1179 void __iomem *sgmii_port_regs;
1180
1181 sgmii_port_regs = priv->sgmii_port_regs;
1182 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1183 sgmii_port_regs = priv->sgmii_port34_regs;
1184
1185 if (!SLAVE_LINK_IS_XGMII(slave)) {
1186 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
1187 netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
1188 slave->link_interface);
1189 }
1190}
1191
1192static int gbe_slave_open(struct gbe_intf *gbe_intf)
1193{
1194 struct gbe_priv *priv = gbe_intf->gbe_dev;
1195 struct gbe_slave *slave = gbe_intf->slave;
1196 phy_interface_t phy_mode;
1197 bool has_phy = false;
1198
1199 void (*hndlr)(struct net_device *) = gbe_adjust_link;
1200
1201 gbe_sgmii_config(priv, slave);
1202 gbe_port_reset(slave);
1203 gbe_port_config(priv, slave, priv->rx_packet_max);
1204 gbe_set_slave_mac(slave, gbe_intf);
1205 /* enable forwarding */
1206 cpsw_ale_control_set(priv->ale, slave->port_num,
1207 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1208 cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
1209 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
1210
1211 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1212 has_phy = true;
1213 phy_mode = PHY_INTERFACE_MODE_SGMII;
1214 slave->phy_port_t = PORT_MII;
1215 } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
1216 has_phy = true;
1217 phy_mode = PHY_INTERFACE_MODE_NA;
1218 slave->phy_port_t = PORT_FIBRE;
1219 }
1220
1221 if (has_phy) {
1222 if (priv->ss_version == XGBE_SS_VERSION_10)
1223 hndlr = xgbe_adjust_link;
1224
1225 slave->phy = of_phy_connect(gbe_intf->ndev,
1226 slave->phy_node,
1227 hndlr, 0,
1228 phy_mode);
1229 if (!slave->phy) {
1230 dev_err(priv->dev, "phy not found on slave %d\n",
1231 slave->slave_num);
1232 return -ENODEV;
1233 }
1234 dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
1235 dev_name(&slave->phy->dev));
1236 phy_start(slave->phy);
1237 phy_read_status(slave->phy);
1238 }
1239 return 0;
1240}
1241
1242static void gbe_init_host_port(struct gbe_priv *priv)
1243{
1244 int bypass_en = 1;
1245 /* Max length register */
1246 writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
1247 rx_maxlen));
1248
1249 cpsw_ale_start(priv->ale);
1250
1251 if (priv->enable_ale)
1252 bypass_en = 0;
1253
1254 cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
1255
1256 cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
1257
1258 cpsw_ale_control_set(priv->ale, priv->host_port,
1259 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1260
1261 cpsw_ale_control_set(priv->ale, 0,
1262 ALE_PORT_UNKNOWN_VLAN_MEMBER,
1263 GBE_PORT_MASK(priv->ale_ports));
1264
1265 cpsw_ale_control_set(priv->ale, 0,
1266 ALE_PORT_UNKNOWN_MCAST_FLOOD,
1267 GBE_PORT_MASK(priv->ale_ports - 1));
1268
1269 cpsw_ale_control_set(priv->ale, 0,
1270 ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
1271 GBE_PORT_MASK(priv->ale_ports));
1272
1273 cpsw_ale_control_set(priv->ale, 0,
1274 ALE_PORT_UNTAGGED_EGRESS,
1275 GBE_PORT_MASK(priv->ale_ports));
1276}
1277
1278static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1279{
1280 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1281 u16 vlan_id;
1282
1283 cpsw_ale_add_mcast(gbe_dev->ale, addr,
1284 GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
1285 ALE_MCAST_FWD_2);
1286 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1287 cpsw_ale_add_mcast(gbe_dev->ale, addr,
1288 GBE_PORT_MASK(gbe_dev->ale_ports),
1289 ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
1290 }
1291}
1292
1293static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1294{
1295 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1296 u16 vlan_id;
1297
1298 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
1299
1300 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
1301 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
1302 ALE_VLAN, vlan_id);
1303}
1304
1305static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1306{
1307 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1308 u16 vlan_id;
1309
1310 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
1311
1312 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1313 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
1314 }
1315}
1316
1317static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
1318{
1319 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1320 u16 vlan_id;
1321
1322 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
1323
1324 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
1325 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
1326 ALE_VLAN, vlan_id);
1327 }
1328}
1329
1330static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
1331{
1332 struct gbe_intf *gbe_intf = intf_priv;
1333 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1334
1335 dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
1336 naddr->addr, naddr->type);
1337
1338 switch (naddr->type) {
1339 case ADDR_MCAST:
1340 case ADDR_BCAST:
1341 gbe_add_mcast_addr(gbe_intf, naddr->addr);
1342 break;
1343 case ADDR_UCAST:
1344 case ADDR_DEV:
1345 gbe_add_ucast_addr(gbe_intf, naddr->addr);
1346 break;
1347 case ADDR_ANY:
1348 /* nothing to do for promiscuous */
1349 default:
1350 break;
1351 }
1352
1353 return 0;
1354}
1355
1356static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
1357{
1358 struct gbe_intf *gbe_intf = intf_priv;
1359 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1360
1361 dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
1362 naddr->addr, naddr->type);
1363
1364 switch (naddr->type) {
1365 case ADDR_MCAST:
1366 case ADDR_BCAST:
1367 gbe_del_mcast_addr(gbe_intf, naddr->addr);
1368 break;
1369 case ADDR_UCAST:
1370 case ADDR_DEV:
1371 gbe_del_ucast_addr(gbe_intf, naddr->addr);
1372 break;
1373 case ADDR_ANY:
1374 /* nothing to do for promiscuous */
1375 default:
1376 break;
1377 }
1378
1379 return 0;
1380}
1381
1382static int gbe_add_vid(void *intf_priv, int vid)
1383{
1384 struct gbe_intf *gbe_intf = intf_priv;
1385 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1386
1387 set_bit(vid, gbe_intf->active_vlans);
1388
1389 cpsw_ale_add_vlan(gbe_dev->ale, vid,
1390 GBE_PORT_MASK(gbe_dev->ale_ports),
1391 GBE_MASK_NO_PORTS,
1392 GBE_PORT_MASK(gbe_dev->ale_ports),
1393 GBE_PORT_MASK(gbe_dev->ale_ports - 1));
1394
1395 return 0;
1396}
1397
1398static int gbe_del_vid(void *intf_priv, int vid)
1399{
1400 struct gbe_intf *gbe_intf = intf_priv;
1401 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1402
1403 cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
1404 clear_bit(vid, gbe_intf->active_vlans);
1405 return 0;
1406}
1407
1408static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
1409{
1410 struct gbe_intf *gbe_intf = intf_priv;
1411 struct phy_device *phy = gbe_intf->slave->phy;
1412 int ret = -EOPNOTSUPP;
1413
1414 if (phy)
1415 ret = phy_mii_ioctl(phy, req, cmd);
1416
1417 return ret;
1418}
1419
1420static void netcp_ethss_timer(unsigned long arg)
1421{
1422 struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
1423 struct gbe_intf *gbe_intf;
1424 struct gbe_slave *slave;
1425
1426 /* Check & update SGMII link state of interfaces */
1427 for_each_intf(gbe_intf, gbe_dev) {
1428 if (!gbe_intf->slave->open)
1429 continue;
1430 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
1431 gbe_intf->ndev);
1432 }
1433
1434 /* Check & update SGMII link state of secondary ports */
1435 for_each_sec_slave(slave, gbe_dev) {
1436 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1437 }
1438
1439 spin_lock_bh(&gbe_dev->hw_stats_lock);
1440
1441 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1442 gbe_update_stats_ver14(gbe_dev, NULL);
1443 else
1444 gbe_update_stats(gbe_dev, NULL);
1445
1446 spin_unlock_bh(&gbe_dev->hw_stats_lock);
1447
1448 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
1449 add_timer(&gbe_dev->timer);
1450}
1451
1452static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
1453{
1454 struct gbe_intf *gbe_intf = data;
1455
1456 p_info->tx_pipe = &gbe_intf->tx_pipe;
1457 return 0;
1458}
1459
1460static int gbe_open(void *intf_priv, struct net_device *ndev)
1461{
1462 struct gbe_intf *gbe_intf = intf_priv;
1463 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
1464 struct netcp_intf *netcp = netdev_priv(ndev);
1465 struct gbe_slave *slave = gbe_intf->slave;
1466 int port_num = slave->port_num;
1467 u32 reg;
1468 int ret;
1469
1470 reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
1471 dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
1472 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
1473 GBE_RTL_VERSION(reg), GBE_IDENT(reg));
1474
1475 if (gbe_dev->enable_ale)
1476 gbe_intf->tx_pipe.dma_psflags = 0;
1477 else
1478 gbe_intf->tx_pipe.dma_psflags = port_num;
1479
1480 dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n",
1481 gbe_intf->tx_pipe.dma_chan_name,
1482 gbe_intf->tx_pipe.dma_channel,
1483 gbe_intf->tx_pipe.dma_psflags);
1484
1485 gbe_slave_stop(gbe_intf);
1486
1487 /* disable priority elevation and enable statistics on all ports */
1488 writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
1489
1490 /* Control register */
1491 writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
1492
1493 /* All statistics enabled and STAT AB visible by default */
1494 writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs,
1495 stat_port_en));
1496
1497 ret = gbe_slave_open(gbe_intf);
1498 if (ret)
1499 goto fail;
1500
1501 netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
1502 gbe_intf);
1503
1504 slave->open = true;
1505 netcp_ethss_update_link_state(gbe_dev, slave, ndev);
1506 return 0;
1507
1508fail:
1509 gbe_slave_stop(gbe_intf);
1510 return ret;
1511}
1512
1513static int gbe_close(void *intf_priv, struct net_device *ndev)
1514{
1515 struct gbe_intf *gbe_intf = intf_priv;
1516 struct netcp_intf *netcp = netdev_priv(ndev);
1517
1518 gbe_slave_stop(gbe_intf);
1519 netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
1520 gbe_intf);
1521
1522 gbe_intf->slave->open = false;
1523 atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
1524 return 0;
1525}
1526
1527static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1528 struct device_node *node)
1529{
1530 int port_reg_num;
1531 u32 port_reg_ofs, emac_reg_ofs;
1532
1533 if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
1534 dev_err(gbe_dev->dev, "missing slave-port parameter\n");
1535 return -EINVAL;
1536 }
1537
1538 if (of_property_read_u32(node, "link-interface",
1539 &slave->link_interface)) {
1540 dev_warn(gbe_dev->dev,
1541 "missing link-interface value defaulting to 1G mac-phy link\n");
1542 slave->link_interface = SGMII_LINK_MAC_PHY;
1543 }
1544
1545 slave->open = false;
1546 slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
1547 slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
1548
1549 if (slave->link_interface >= XGMII_LINK_MAC_PHY)
1550 slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
1551 else
1552 slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
1553
1554 /* Emac regs memmap are contiguous but port regs are not */
1555 port_reg_num = slave->slave_num;
1556 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
1557 if (slave->slave_num > 1) {
1558 port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
1559 port_reg_num -= 2;
1560 } else {
1561 port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
1562 }
1563 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
1564 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
1565 } else {
1566 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
1567 gbe_dev->ss_version);
1568 return -EINVAL;
1569 }
1570
1571 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1572 emac_reg_ofs = GBE13_EMAC_OFFSET;
1573 else if (gbe_dev->ss_version == XGBE_SS_VERSION_10)
1574 emac_reg_ofs = XGBE10_EMAC_OFFSET;
1575
1576 slave->port_regs = gbe_dev->ss_regs + port_reg_ofs +
1577 (0x30 * port_reg_num);
1578 slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs +
1579 (0x40 * slave->slave_num);
1580
1581 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
1582 /* Initialize slave port register offsets */
1583 GBE_SET_REG_OFS(slave, port_regs, port_vlan);
1584 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
1585 GBE_SET_REG_OFS(slave, port_regs, sa_lo);
1586 GBE_SET_REG_OFS(slave, port_regs, sa_hi);
1587 GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
1588 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
1589 GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
1590 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
1591 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
1592
1593 /* Initialize EMAC register offsets */
1594 GBE_SET_REG_OFS(slave, emac_regs, mac_control);
1595 GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
1596 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
1597
1598 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
1599 /* Initialize slave port register offsets */
1600 XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
1601 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
1602 XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
1603 XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
1604 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
1605 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
1606 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
1607 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
1608 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
1609
1610 /* Initialize EMAC register offsets */
1611 XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
1612 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
1613 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
1614 }
1615
1616 atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
1617 return 0;
1618}
1619
1620static void init_secondary_ports(struct gbe_priv *gbe_dev,
1621 struct device_node *node)
1622{
1623 struct device *dev = gbe_dev->dev;
1624 phy_interface_t phy_mode;
1625 struct gbe_priv **priv;
1626 struct device_node *port;
1627 struct gbe_slave *slave;
1628 bool mac_phy_link = false;
1629
1630 for_each_child_of_node(node, port) {
1631 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
1632 if (!slave) {
1633 dev_err(dev,
1634 "memomry alloc failed for secondary port(%s), skipping...\n",
1635 port->name);
1636 continue;
1637 }
1638
1639 if (init_slave(gbe_dev, slave, port)) {
1640 dev_err(dev,
1641 "Failed to initialize secondary port(%s), skipping...\n",
1642 port->name);
1643 devm_kfree(dev, slave);
1644 continue;
1645 }
1646
1647 gbe_sgmii_config(gbe_dev, slave);
1648 gbe_port_reset(slave);
1649 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
1650 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
1651 gbe_dev->num_slaves++;
1652 if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
1653 (slave->link_interface == XGMII_LINK_MAC_PHY))
1654 mac_phy_link = true;
1655
1656 slave->open = true;
1657 }
1658
1659 /* of_phy_connect() is needed only for MAC-PHY interface */
1660 if (!mac_phy_link)
1661 return;
1662
1663 /* Allocate dummy netdev device for attaching to phy device */
1664 gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
1665 NET_NAME_UNKNOWN, ether_setup);
1666 if (!gbe_dev->dummy_ndev) {
1667 dev_err(dev,
1668 "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
1669 return;
1670 }
1671 priv = netdev_priv(gbe_dev->dummy_ndev);
1672 *priv = gbe_dev;
1673
1674 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1675 phy_mode = PHY_INTERFACE_MODE_SGMII;
1676 slave->phy_port_t = PORT_MII;
1677 } else {
1678 phy_mode = PHY_INTERFACE_MODE_NA;
1679 slave->phy_port_t = PORT_FIBRE;
1680 }
1681
1682 for_each_sec_slave(slave, gbe_dev) {
1683 if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
1684 (slave->link_interface != XGMII_LINK_MAC_PHY))
1685 continue;
1686 slave->phy =
1687 of_phy_connect(gbe_dev->dummy_ndev,
1688 slave->phy_node,
1689 gbe_adjust_link_sec_slaves,
1690 0, phy_mode);
1691 if (!slave->phy) {
1692 dev_err(dev, "phy not found for slave %d\n",
1693 slave->slave_num);
1694 slave->phy = NULL;
1695 } else {
1696 dev_dbg(dev, "phy found: id is: 0x%s\n",
1697 dev_name(&slave->phy->dev));
1698 phy_start(slave->phy);
1699 phy_read_status(slave->phy);
1700 }
1701 }
1702}
1703
1704static void free_secondary_ports(struct gbe_priv *gbe_dev)
1705{
1706 struct gbe_slave *slave;
1707
1708 for (;;) {
1709 slave = first_sec_slave(gbe_dev);
1710 if (!slave)
1711 break;
1712 if (slave->phy)
1713 phy_disconnect(slave->phy);
1714 list_del(&slave->slave_list);
1715 }
1716 if (gbe_dev->dummy_ndev)
1717 free_netdev(gbe_dev->dummy_ndev);
1718}
1719
1720static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
1721 struct device_node *node)
1722{
1723 struct resource res;
1724 void __iomem *regs;
1725 int ret, i;
1726
1727 ret = of_address_to_resource(node, 0, &res);
1728 if (ret) {
1729 dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n",
1730 node->name);
1731 return ret;
1732 }
1733
1734 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1735 if (IS_ERR(regs)) {
1736 dev_err(gbe_dev->dev, "Failed to map xgbe register base\n");
1737 return PTR_ERR(regs);
1738 }
1739 gbe_dev->ss_regs = regs;
1740
1741 ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
1742 if (ret) {
1743 dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n",
1744 node->name);
1745 return ret;
1746 }
1747
1748 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1749 if (IS_ERR(regs)) {
1750 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
1751 return PTR_ERR(regs);
1752 }
1753 gbe_dev->xgbe_serdes_regs = regs;
1754
1755 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
1756 XGBE10_NUM_STAT_ENTRIES *
1757 (XGBE10_NUM_SLAVES + 1) * sizeof(u64),
1758 GFP_KERNEL);
1759 if (!gbe_dev->hw_stats) {
1760 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
1761 return -ENOMEM;
1762 }
1763
1764 gbe_dev->ss_version = XGBE_SS_VERSION_10;
1765 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
1766 XGBE10_SGMII_MODULE_OFFSET;
1767 gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET;
1768 gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
1769
1770 for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++)
1771 gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs +
1772 XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
1773
1774 gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET;
1775 gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS;
1776 gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
1777 gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
1778 gbe_dev->et_stats = xgbe10_et_stats;
1779 gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
1780
1781 /* Subsystem registers */
1782 XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
1783 XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
1784
1785 /* Switch module registers */
1786 XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
1787 XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
1788 XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
1789 XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
1790 XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
1791
1792 /* Host port registers */
1793 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
1794 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
1795 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
1796 return 0;
1797}
1798
1799static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
1800 struct device_node *node)
1801{
1802 struct resource res;
1803 void __iomem *regs;
1804 int ret;
1805
1806 ret = of_address_to_resource(node, 0, &res);
1807 if (ret) {
1808 dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n",
1809 node->name);
1810 return ret;
1811 }
1812
1813 regs = devm_ioremap_resource(gbe_dev->dev, &res);
1814 if (IS_ERR(regs)) {
1815 dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
1816 return PTR_ERR(regs);
1817 }
1818 gbe_dev->ss_regs = regs;
1819 gbe_dev->ss_version = readl(gbe_dev->ss_regs);
1820 return 0;
1821}
1822
1823static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
1824 struct device_node *node)
1825{
1826 void __iomem *regs;
1827 int i;
1828
1829 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
1830 GBE13_NUM_HW_STAT_ENTRIES *
1831 GBE13_NUM_SLAVES * sizeof(u64),
1832 GFP_KERNEL);
1833 if (!gbe_dev->hw_stats) {
1834 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
1835 return -ENOMEM;
1836 }
1837
1838 regs = gbe_dev->ss_regs;
1839 gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET;
1840 gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET;
1841 gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET;
1842 gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET;
1843
1844 for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++)
1845 gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET +
1846 (GBE_HW_STATS_REG_MAP_SZ * i);
1847
1848 gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET;
1849 gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS;
1850 gbe_dev->host_port = GBE13_HOST_PORT_NUM;
1851 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
1852 gbe_dev->et_stats = gbe13_et_stats;
1853 gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
1854
1855 /* Subsystem registers */
1856 GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
1857
1858 /* Switch module registers */
1859 GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
1860 GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
1861 GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
1862 GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
1863 GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
1864 GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
1865
1866 /* Host port registers */
1867 GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
1868 GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
1869 return 0;
1870}
1871
1872static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
1873 struct device_node *node, void **inst_priv)
1874{
1875 struct device_node *interfaces, *interface;
1876 struct device_node *secondary_ports;
1877 struct cpsw_ale_params ale_params;
1878 struct gbe_priv *gbe_dev;
1879 u32 slave_num;
1880 int ret = 0;
1881
1882 if (!node) {
1883 dev_err(dev, "device tree info unavailable\n");
1884 return -ENODEV;
1885 }
1886
1887 gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
1888 if (!gbe_dev)
1889 return -ENOMEM;
1890
1891 gbe_dev->dev = dev;
1892 gbe_dev->netcp_device = netcp_device;
1893 gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
1894
1895 /* init the hw stats lock */
1896 spin_lock_init(&gbe_dev->hw_stats_lock);
1897
1898 if (of_find_property(node, "enable-ale", NULL)) {
1899 gbe_dev->enable_ale = true;
1900 dev_info(dev, "ALE enabled\n");
1901 } else {
1902 gbe_dev->enable_ale = false;
1903 dev_dbg(dev, "ALE bypass enabled*\n");
1904 }
1905
1906 ret = of_property_read_u32(node, "tx-queue",
1907 &gbe_dev->tx_queue_id);
1908 if (ret < 0) {
1909 dev_err(dev, "missing tx_queue parameter\n");
1910 gbe_dev->tx_queue_id = GBE_TX_QUEUE;
1911 }
1912
1913 ret = of_property_read_string(node, "tx-channel",
1914 &gbe_dev->dma_chan_name);
1915 if (ret < 0) {
1916 dev_err(dev, "missing \"tx-channel\" parameter\n");
1917 ret = -ENODEV;
1918 goto quit;
1919 }
1920
1921 if (!strcmp(node->name, "gbe")) {
1922 ret = get_gbe_resource_version(gbe_dev, node);
1923 if (ret)
1924 goto quit;
1925
1926 ret = set_gbe_ethss14_priv(gbe_dev, node);
1927 if (ret)
1928 goto quit;
1929 } else if (!strcmp(node->name, "xgbe")) {
1930 ret = set_xgbe_ethss10_priv(gbe_dev, node);
1931 if (ret)
1932 goto quit;
1933 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
1934 gbe_dev->ss_regs);
1935 if (ret)
1936 goto quit;
1937 } else {
1938 dev_err(dev, "unknown GBE node(%s)\n", node->name);
1939 ret = -ENODEV;
1940 goto quit;
1941 }
1942
1943 interfaces = of_get_child_by_name(node, "interfaces");
1944 if (!interfaces)
1945 dev_err(dev, "could not find interfaces\n");
1946
1947 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
1948 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
1949 if (ret)
1950 goto quit;
1951
1952 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
1953 if (ret)
1954 goto quit;
1955
1956 /* Create network interfaces */
1957 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
1958 for_each_child_of_node(interfaces, interface) {
1959 ret = of_property_read_u32(interface, "slave-port", &slave_num);
1960 if (ret) {
1961 dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
1962 interface->name);
1963 continue;
1964 }
1965 gbe_dev->num_slaves++;
1966 }
1967
1968 if (!gbe_dev->num_slaves)
1969 dev_warn(dev, "No network interface configured\n");
1970
1971 /* Initialize Secondary slave ports */
1972 secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
1973 INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
1974 if (secondary_ports)
1975 init_secondary_ports(gbe_dev, secondary_ports);
1976 of_node_put(secondary_ports);
1977
1978 if (!gbe_dev->num_slaves) {
1979 dev_err(dev, "No network interface or secondary ports configured\n");
1980 ret = -ENODEV;
1981 goto quit;
1982 }
1983
1984 memset(&ale_params, 0, sizeof(ale_params));
1985 ale_params.dev = gbe_dev->dev;
1986 ale_params.ale_regs = gbe_dev->ale_reg;
1987 ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
1988 ale_params.ale_entries = gbe_dev->ale_entries;
1989 ale_params.ale_ports = gbe_dev->ale_ports;
1990
1991 gbe_dev->ale = cpsw_ale_create(&ale_params);
1992 if (!gbe_dev->ale) {
1993 dev_err(gbe_dev->dev, "error initializing ale engine\n");
1994 ret = -ENODEV;
1995 goto quit;
1996 } else {
1997 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
1998 }
1999
2000 /* initialize host port */
2001 gbe_init_host_port(gbe_dev);
2002
2003 init_timer(&gbe_dev->timer);
2004 gbe_dev->timer.data = (unsigned long)gbe_dev;
2005 gbe_dev->timer.function = netcp_ethss_timer;
2006 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
2007 add_timer(&gbe_dev->timer);
2008 *inst_priv = gbe_dev;
2009 return 0;
2010
2011quit:
2012 if (gbe_dev->hw_stats)
2013 devm_kfree(dev, gbe_dev->hw_stats);
2014 cpsw_ale_destroy(gbe_dev->ale);
2015 if (gbe_dev->ss_regs)
2016 devm_iounmap(dev, gbe_dev->ss_regs);
2017 of_node_put(interfaces);
2018 devm_kfree(dev, gbe_dev);
2019 return ret;
2020}
2021
2022static int gbe_attach(void *inst_priv, struct net_device *ndev,
2023 struct device_node *node, void **intf_priv)
2024{
2025 struct gbe_priv *gbe_dev = inst_priv;
2026 struct gbe_intf *gbe_intf;
2027 int ret;
2028
2029 if (!node) {
2030 dev_err(gbe_dev->dev, "interface node not available\n");
2031 return -ENODEV;
2032 }
2033
2034 gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
2035 if (!gbe_intf)
2036 return -ENOMEM;
2037
2038 gbe_intf->ndev = ndev;
2039 gbe_intf->dev = gbe_dev->dev;
2040 gbe_intf->gbe_dev = gbe_dev;
2041
2042 gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
2043 sizeof(*gbe_intf->slave),
2044 GFP_KERNEL);
2045 if (!gbe_intf->slave) {
2046 ret = -ENOMEM;
2047 goto fail;
2048 }
2049
2050 if (init_slave(gbe_dev, gbe_intf->slave, node)) {
2051 ret = -ENODEV;
2052 goto fail;
2053 }
2054
2055 gbe_intf->tx_pipe = gbe_dev->tx_pipe;
2056 ndev->ethtool_ops = &keystone_ethtool_ops;
2057 list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
2058 *intf_priv = gbe_intf;
2059 return 0;
2060
2061fail:
2062 if (gbe_intf->slave)
2063 devm_kfree(gbe_dev->dev, gbe_intf->slave);
2064 if (gbe_intf)
2065 devm_kfree(gbe_dev->dev, gbe_intf);
2066 return ret;
2067}
2068
2069static int gbe_release(void *intf_priv)
2070{
2071 struct gbe_intf *gbe_intf = intf_priv;
2072
2073 gbe_intf->ndev->ethtool_ops = NULL;
2074 list_del(&gbe_intf->gbe_intf_list);
2075 devm_kfree(gbe_intf->dev, gbe_intf->slave);
2076 devm_kfree(gbe_intf->dev, gbe_intf);
2077 return 0;
2078}
2079
2080static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
2081{
2082 struct gbe_priv *gbe_dev = inst_priv;
2083
2084 del_timer_sync(&gbe_dev->timer);
2085 cpsw_ale_stop(gbe_dev->ale);
2086 cpsw_ale_destroy(gbe_dev->ale);
2087 netcp_txpipe_close(&gbe_dev->tx_pipe);
2088 free_secondary_ports(gbe_dev);
2089
2090 if (!list_empty(&gbe_dev->gbe_intf_head))
2091 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
2092
2093 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
2094 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
2095 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
2096 devm_kfree(gbe_dev->dev, gbe_dev);
2097 return 0;
2098}
2099
2100static struct netcp_module gbe_module = {
2101 .name = GBE_MODULE_NAME,
2102 .owner = THIS_MODULE,
2103 .primary = true,
2104 .probe = gbe_probe,
2105 .open = gbe_open,
2106 .close = gbe_close,
2107 .remove = gbe_remove,
2108 .attach = gbe_attach,
2109 .release = gbe_release,
2110 .add_addr = gbe_add_addr,
2111 .del_addr = gbe_del_addr,
2112 .add_vid = gbe_add_vid,
2113 .del_vid = gbe_del_vid,
2114 .ioctl = gbe_ioctl,
2115};
2116
2117static struct netcp_module xgbe_module = {
2118 .name = XGBE_MODULE_NAME,
2119 .owner = THIS_MODULE,
2120 .primary = true,
2121 .probe = gbe_probe,
2122 .open = gbe_open,
2123 .close = gbe_close,
2124 .remove = gbe_remove,
2125 .attach = gbe_attach,
2126 .release = gbe_release,
2127 .add_addr = gbe_add_addr,
2128 .del_addr = gbe_del_addr,
2129 .add_vid = gbe_add_vid,
2130 .del_vid = gbe_del_vid,
2131 .ioctl = gbe_ioctl,
2132};
2133
2134static int __init keystone_gbe_init(void)
2135{
2136 int ret;
2137
2138 ret = netcp_register_module(&gbe_module);
2139 if (ret)
2140 return ret;
2141
2142 ret = netcp_register_module(&xgbe_module);
2143 if (ret)
2144 return ret;
2145
2146 return 0;
2147}
2148module_init(keystone_gbe_init);
2149
2150static void __exit keystone_gbe_exit(void)
2151{
2152 netcp_unregister_module(&gbe_module);
2153 netcp_unregister_module(&xgbe_module);
2154}
2155module_exit(keystone_gbe_exit);
2156
2157MODULE_LICENSE("GPL v2");
2158MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
2159MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
new file mode 100644
index 000000000000..dbeb14266e2f
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -0,0 +1,131 @@
1/*
2 * SGMI module initialisation
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Wingman Kwok <w-kwok2@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include "netcp.h"
20
21#define SGMII_REG_STATUS_LOCK BIT(4)
22#define SGMII_REG_STATUS_LINK BIT(0)
23#define SGMII_REG_STATUS_AUTONEG BIT(2)
24#define SGMII_REG_CONTROL_AUTONEG BIT(0)
25
26#define SGMII23_OFFSET(x) ((x - 2) * 0x100)
27#define SGMII_OFFSET(x) ((x <= 1) ? (x * 0x100) : (SGMII23_OFFSET(x)))
28
29/* SGMII registers */
30#define SGMII_SRESET_REG(x) (SGMII_OFFSET(x) + 0x004)
31#define SGMII_CTL_REG(x) (SGMII_OFFSET(x) + 0x010)
32#define SGMII_STATUS_REG(x) (SGMII_OFFSET(x) + 0x014)
33#define SGMII_MRADV_REG(x) (SGMII_OFFSET(x) + 0x018)
34
35static void sgmii_write_reg(void __iomem *base, int reg, u32 val)
36{
37 writel(val, base + reg);
38}
39
40static u32 sgmii_read_reg(void __iomem *base, int reg)
41{
42 return readl(base + reg);
43}
44
45static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
46{
47 writel((readl(base + reg) | val), base + reg);
48}
49
50/* port is 0 based */
51int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
52{
53 /* Soft reset */
54 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1);
55 while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0)
56 ;
57 return 0;
58}
59
60int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
61{
62 u32 status = 0, link = 0;
63
64 status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
65 if ((status & SGMII_REG_STATUS_LINK) != 0)
66 link = 1;
67 return link;
68}
69
70int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface)
71{
72 unsigned int i, status, mask;
73 u32 mr_adv_ability;
74 u32 control;
75
76 switch (interface) {
77 case SGMII_LINK_MAC_MAC_AUTONEG:
78 mr_adv_ability = 0x9801;
79 control = 0x21;
80 break;
81
82 case SGMII_LINK_MAC_PHY:
83 case SGMII_LINK_MAC_PHY_NO_MDIO:
84 mr_adv_ability = 1;
85 control = 1;
86 break;
87
88 case SGMII_LINK_MAC_MAC_FORCED:
89 mr_adv_ability = 0x9801;
90 control = 0x20;
91 break;
92
93 case SGMII_LINK_MAC_FIBER:
94 mr_adv_ability = 0x20;
95 control = 0x1;
96 break;
97
98 default:
99 WARN_ONCE(1, "Invalid sgmii interface: %d\n", interface);
100 return -EINVAL;
101 }
102
103 sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), 0);
104
105 /* Wait for the SerDes pll to lock */
106 for (i = 0; i < 1000; i++) {
107 usleep_range(1000, 2000);
108 status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
109 if ((status & SGMII_REG_STATUS_LOCK) != 0)
110 break;
111 }
112
113 if ((status & SGMII_REG_STATUS_LOCK) == 0)
114 pr_err("serdes PLL not locked\n");
115
116 sgmii_write_reg(sgmii_ofs, SGMII_MRADV_REG(port), mr_adv_ability);
117 sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), control);
118
119 mask = SGMII_REG_STATUS_LINK;
120 if (control & SGMII_REG_CONTROL_AUTONEG)
121 mask |= SGMII_REG_STATUS_AUTONEG;
122
123 for (i = 0; i < 1000; i++) {
124 usleep_range(200, 500);
125 status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
126 if ((status & mask) == mask)
127 break;
128 }
129
130 return 0;
131}
diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
new file mode 100644
index 000000000000..33571acc52b6
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
@@ -0,0 +1,501 @@
1/*
2 * XGE PCSR module initialisation
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * WingMan Kwok <w-kwok2@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation version 2.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17#include "netcp.h"
18
19/* XGBE registers */
20#define XGBE_CTRL_OFFSET 0x0c
21#define XGBE_SGMII_1_OFFSET 0x0114
22#define XGBE_SGMII_2_OFFSET 0x0214
23
24/* PCS-R registers */
25#define PCSR_CPU_CTRL_OFFSET 0x1fd0
26#define POR_EN BIT(29)
27
28#define reg_rmw(addr, value, mask) \
29 writel(((readl(addr) & (~(mask))) | \
30 (value & (mask))), (addr))
31
32/* bit mask of width w at offset s */
33#define MASK_WID_SH(w, s) (((1 << w) - 1) << s)
34
35/* shift value v to offset s */
36#define VAL_SH(v, s) (v << s)
37
38#define PHY_A(serdes) 0
39
40struct serdes_cfg {
41 u32 ofs;
42 u32 val;
43 u32 mask;
44};
45
46static struct serdes_cfg cfg_phyb_1p25g_156p25mhz_cmu0[] = {
47 {0x0000, 0x00800002, 0x00ff00ff},
48 {0x0014, 0x00003838, 0x0000ffff},
49 {0x0060, 0x1c44e438, 0xffffffff},
50 {0x0064, 0x00c18400, 0x00ffffff},
51 {0x0068, 0x17078200, 0xffffff00},
52 {0x006c, 0x00000014, 0x000000ff},
53 {0x0078, 0x0000c000, 0x0000ff00},
54 {0x0000, 0x00000003, 0x000000ff},
55};
56
57static struct serdes_cfg cfg_phyb_10p3125g_156p25mhz_cmu1[] = {
58 {0x0c00, 0x00030002, 0x00ff00ff},
59 {0x0c14, 0x00005252, 0x0000ffff},
60 {0x0c28, 0x80000000, 0xff000000},
61 {0x0c2c, 0x000000f6, 0x000000ff},
62 {0x0c3c, 0x04000405, 0xff00ffff},
63 {0x0c40, 0xc0800000, 0xffff0000},
64 {0x0c44, 0x5a202062, 0xffffffff},
65 {0x0c48, 0x40040424, 0xffffffff},
66 {0x0c4c, 0x00004002, 0x0000ffff},
67 {0x0c50, 0x19001c00, 0xff00ff00},
68 {0x0c54, 0x00002100, 0x0000ff00},
69 {0x0c58, 0x00000060, 0x000000ff},
70 {0x0c60, 0x80131e7c, 0xffffffff},
71 {0x0c64, 0x8400cb02, 0xff00ffff},
72 {0x0c68, 0x17078200, 0xffffff00},
73 {0x0c6c, 0x00000016, 0x000000ff},
74 {0x0c74, 0x00000400, 0x0000ff00},
75 {0x0c78, 0x0000c000, 0x0000ff00},
76 {0x0c00, 0x00000003, 0x000000ff},
77};
78
79static struct serdes_cfg cfg_phyb_10p3125g_16bit_lane[] = {
80 {0x0204, 0x00000080, 0x000000ff},
81 {0x0208, 0x0000920d, 0x0000ffff},
82 {0x0204, 0xfc000000, 0xff000000},
83 {0x0208, 0x00009104, 0x0000ffff},
84 {0x0210, 0x1a000000, 0xff000000},
85 {0x0214, 0x00006b58, 0x00ffffff},
86 {0x0218, 0x75800084, 0xffff00ff},
87 {0x022c, 0x00300000, 0x00ff0000},
88 {0x0230, 0x00003800, 0x0000ff00},
89 {0x024c, 0x008f0000, 0x00ff0000},
90 {0x0250, 0x30000000, 0xff000000},
91 {0x0260, 0x00000002, 0x000000ff},
92 {0x0264, 0x00000057, 0x000000ff},
93 {0x0268, 0x00575700, 0x00ffff00},
94 {0x0278, 0xff000000, 0xff000000},
95 {0x0280, 0x00500050, 0x00ff00ff},
96 {0x0284, 0x00001f15, 0x0000ffff},
97 {0x028c, 0x00006f00, 0x0000ff00},
98 {0x0294, 0x00000000, 0xffffff00},
99 {0x0298, 0x00002640, 0xff00ffff},
100 {0x029c, 0x00000003, 0x000000ff},
101 {0x02a4, 0x00000f13, 0x0000ffff},
102 {0x02a8, 0x0001b600, 0x00ffff00},
103 {0x0380, 0x00000030, 0x000000ff},
104 {0x03c0, 0x00000200, 0x0000ff00},
105 {0x03cc, 0x00000018, 0x000000ff},
106 {0x03cc, 0x00000000, 0x000000ff},
107};
108
109static struct serdes_cfg cfg_phyb_10p3125g_comlane[] = {
110 {0x0a00, 0x00000800, 0x0000ff00},
111 {0x0a84, 0x00000000, 0x000000ff},
112 {0x0a8c, 0x00130000, 0x00ff0000},
113 {0x0a90, 0x77a00000, 0xffff0000},
114 {0x0a94, 0x00007777, 0x0000ffff},
115 {0x0b08, 0x000f0000, 0xffff0000},
116 {0x0b0c, 0x000f0000, 0x00ffffff},
117 {0x0b10, 0xbe000000, 0xff000000},
118 {0x0b14, 0x000000ff, 0x000000ff},
119 {0x0b18, 0x00000014, 0x000000ff},
120 {0x0b5c, 0x981b0000, 0xffff0000},
121 {0x0b64, 0x00001100, 0x0000ff00},
122 {0x0b78, 0x00000c00, 0x0000ff00},
123 {0x0abc, 0xff000000, 0xff000000},
124 {0x0ac0, 0x0000008b, 0x000000ff},
125};
126
127static struct serdes_cfg cfg_cm_c1_c2[] = {
128 {0x0208, 0x00000000, 0x00000f00},
129 {0x0208, 0x00000000, 0x0000001f},
130 {0x0204, 0x00000000, 0x00040000},
131 {0x0208, 0x000000a0, 0x000000e0},
132};
133
134static void netcp_xgbe_serdes_cmu_init(void __iomem *serdes_regs)
135{
136 int i;
137
138 /* cmu0 setup */
139 for (i = 0; i < ARRAY_SIZE(cfg_phyb_1p25g_156p25mhz_cmu0); i++) {
140 reg_rmw(serdes_regs + cfg_phyb_1p25g_156p25mhz_cmu0[i].ofs,
141 cfg_phyb_1p25g_156p25mhz_cmu0[i].val,
142 cfg_phyb_1p25g_156p25mhz_cmu0[i].mask);
143 }
144
145 /* cmu1 setup */
146 for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_156p25mhz_cmu1); i++) {
147 reg_rmw(serdes_regs + cfg_phyb_10p3125g_156p25mhz_cmu1[i].ofs,
148 cfg_phyb_10p3125g_156p25mhz_cmu1[i].val,
149 cfg_phyb_10p3125g_156p25mhz_cmu1[i].mask);
150 }
151}
152
153/* lane is 0 based */
154static void netcp_xgbe_serdes_lane_config(
155 void __iomem *serdes_regs, int lane)
156{
157 int i;
158
159 /* lane setup */
160 for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_16bit_lane); i++) {
161 reg_rmw(serdes_regs +
162 cfg_phyb_10p3125g_16bit_lane[i].ofs +
163 (0x200 * lane),
164 cfg_phyb_10p3125g_16bit_lane[i].val,
165 cfg_phyb_10p3125g_16bit_lane[i].mask);
166 }
167
168 /* disable auto negotiation*/
169 reg_rmw(serdes_regs + (0x200 * lane) + 0x0380,
170 0x00000000, 0x00000010);
171
172 /* disable link training */
173 reg_rmw(serdes_regs + (0x200 * lane) + 0x03c0,
174 0x00000000, 0x00000200);
175}
176
177static void netcp_xgbe_serdes_com_enable(void __iomem *serdes_regs)
178{
179 int i;
180
181 for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_comlane); i++) {
182 reg_rmw(serdes_regs + cfg_phyb_10p3125g_comlane[i].ofs,
183 cfg_phyb_10p3125g_comlane[i].val,
184 cfg_phyb_10p3125g_comlane[i].mask);
185 }
186}
187
188static void netcp_xgbe_serdes_lane_enable(
189 void __iomem *serdes_regs, int lane)
190{
191 /* Set Lane Control Rate */
192 writel(0xe0e9e038, serdes_regs + 0x1fe0 + (4 * lane));
193}
194
195static void netcp_xgbe_serdes_phyb_rst_clr(void __iomem *serdes_regs)
196{
197 reg_rmw(serdes_regs + 0x0a00, 0x0000001f, 0x000000ff);
198}
199
200static void netcp_xgbe_serdes_pll_disable(void __iomem *serdes_regs)
201{
202 writel(0x88000000, serdes_regs + 0x1ff4);
203}
204
205static void netcp_xgbe_serdes_pll_enable(void __iomem *serdes_regs)
206{
207 netcp_xgbe_serdes_phyb_rst_clr(serdes_regs);
208 writel(0xee000000, serdes_regs + 0x1ff4);
209}
210
211static int netcp_xgbe_wait_pll_locked(void __iomem *sw_regs)
212{
213 unsigned long timeout;
214 int ret = 0;
215 u32 val_1, val_0;
216
217 timeout = jiffies + msecs_to_jiffies(500);
218 do {
219 val_0 = (readl(sw_regs + XGBE_SGMII_1_OFFSET) & BIT(4));
220 val_1 = (readl(sw_regs + XGBE_SGMII_2_OFFSET) & BIT(4));
221
222 if (val_1 && val_0)
223 return 0;
224
225 if (time_after(jiffies, timeout)) {
226 ret = -ETIMEDOUT;
227 break;
228 }
229
230 cpu_relax();
231 } while (true);
232
233 pr_err("XGBE serdes not locked: time out.\n");
234 return ret;
235}
236
237static void netcp_xgbe_serdes_enable_xgmii_port(void __iomem *sw_regs)
238{
239 writel(0x03, sw_regs + XGBE_CTRL_OFFSET);
240}
241
242static u32 netcp_xgbe_serdes_read_tbus_val(void __iomem *serdes_regs)
243{
244 u32 tmp;
245
246 if (PHY_A(serdes_regs)) {
247 tmp = (readl(serdes_regs + 0x0ec) >> 24) & 0x0ff;
248 tmp |= ((readl(serdes_regs + 0x0fc) >> 16) & 0x00f00);
249 } else {
250 tmp = (readl(serdes_regs + 0x0f8) >> 16) & 0x0fff;
251 }
252
253 return tmp;
254}
255
256static void netcp_xgbe_serdes_write_tbus_addr(void __iomem *serdes_regs,
257 int select, int ofs)
258{
259 if (PHY_A(serdes_regs)) {
260 reg_rmw(serdes_regs + 0x0008, ((select << 5) + ofs) << 24,
261 ~0x00ffffff);
262 return;
263 }
264
265 /* For 2 lane Phy-B, lane0 is actually lane1 */
266 switch (select) {
267 case 1:
268 select = 2;
269 break;
270 case 2:
271 select = 3;
272 break;
273 default:
274 return;
275 }
276
277 reg_rmw(serdes_regs + 0x00fc, ((select << 8) + ofs) << 16, ~0xf800ffff);
278}
279
280static u32 netcp_xgbe_serdes_read_select_tbus(void __iomem *serdes_regs,
281 int select, int ofs)
282{
283 /* Set tbus address */
284 netcp_xgbe_serdes_write_tbus_addr(serdes_regs, select, ofs);
285 /* Get TBUS Value */
286 return netcp_xgbe_serdes_read_tbus_val(serdes_regs);
287}
288
289static void netcp_xgbe_serdes_reset_cdr(void __iomem *serdes_regs,
290 void __iomem *sig_detect_reg, int lane)
291{
292 u32 tmp, dlpf, tbus;
293
294 /*Get the DLPF values */
295 tmp = netcp_xgbe_serdes_read_select_tbus(
296 serdes_regs, lane + 1, 5);
297
298 dlpf = tmp >> 2;
299
300 if (dlpf < 400 || dlpf > 700) {
301 reg_rmw(sig_detect_reg, VAL_SH(2, 1), MASK_WID_SH(2, 1));
302 mdelay(1);
303 reg_rmw(sig_detect_reg, VAL_SH(0, 1), MASK_WID_SH(2, 1));
304 } else {
305 tbus = netcp_xgbe_serdes_read_select_tbus(serdes_regs, lane +
306 1, 0xe);
307
308 pr_debug("XGBE: CDR centered, DLPF: %4d,%d,%d.\n",
309 tmp >> 2, tmp & 3, (tbus >> 2) & 3);
310 }
311}
312
313/* Call every 100 ms */
314static int netcp_xgbe_check_link_status(void __iomem *serdes_regs,
315 void __iomem *sw_regs, u32 lanes,
316 u32 *current_state, u32 *lane_down)
317{
318 void __iomem *pcsr_base = sw_regs + 0x0600;
319 void __iomem *sig_detect_reg;
320 u32 pcsr_rx_stat, blk_lock, blk_errs;
321 int loss, i, status = 1;
322
323 for (i = 0; i < lanes; i++) {
324 /* Get the Loss bit */
325 loss = readl(serdes_regs + 0x1fc0 + 0x20 + (i * 0x04)) & 0x1;
326
327 /* Get Block Errors and Block Lock bits */
328 pcsr_rx_stat = readl(pcsr_base + 0x0c + (i * 0x80));
329 blk_lock = (pcsr_rx_stat >> 30) & 0x1;
330 blk_errs = (pcsr_rx_stat >> 16) & 0x0ff;
331
332 /* Get Signal Detect Overlay Address */
333 sig_detect_reg = serdes_regs + (i * 0x200) + 0x200 + 0x04;
334
335 /* If Block errors maxed out, attempt recovery! */
336 if (blk_errs == 0x0ff)
337 blk_lock = 0;
338
339 switch (current_state[i]) {
340 case 0:
341 /* if good link lock the signal detect ON! */
342 if (!loss && blk_lock) {
343 pr_debug("XGBE PCSR Linked Lane: %d\n", i);
344 reg_rmw(sig_detect_reg, VAL_SH(3, 1),
345 MASK_WID_SH(2, 1));
346 current_state[i] = 1;
347 } else if (!blk_lock) {
348 /* if no lock, then reset CDR */
349 pr_debug("XGBE PCSR Recover Lane: %d\n", i);
350 netcp_xgbe_serdes_reset_cdr(serdes_regs,
351 sig_detect_reg, i);
352 }
353 break;
354
355 case 1:
356 if (!blk_lock) {
357 /* Link Lost? */
358 lane_down[i] = 1;
359 current_state[i] = 2;
360 }
361 break;
362
363 case 2:
364 if (blk_lock)
365 /* Nope just noise */
366 current_state[i] = 1;
367 else {
368 /* Lost the block lock, reset CDR if it is
369 * not centered and go back to sync state
370 */
371 netcp_xgbe_serdes_reset_cdr(serdes_regs,
372 sig_detect_reg, i);
373 current_state[i] = 0;
374 }
375 break;
376
377 default:
378 pr_err("XGBE: unknown current_state[%d] %d\n",
379 i, current_state[i]);
380 break;
381 }
382
383 if (blk_errs > 0) {
384 /* Reset the Error counts! */
385 reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x19, 0),
386 MASK_WID_SH(8, 0));
387
388 reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x00, 0),
389 MASK_WID_SH(8, 0));
390 }
391
392 status &= (current_state[i] == 1);
393 }
394
395 return status;
396}
397
398static int netcp_xgbe_serdes_check_lane(void __iomem *serdes_regs,
399 void __iomem *sw_regs)
400{
401 u32 current_state[2] = {0, 0};
402 int retries = 0, link_up;
403 u32 lane_down[2];
404
405 do {
406 lane_down[0] = 0;
407 lane_down[1] = 0;
408
409 link_up = netcp_xgbe_check_link_status(serdes_regs, sw_regs, 2,
410 current_state,
411 lane_down);
412
413 /* if we did not get link up then wait 100ms before calling
414 * it again
415 */
416 if (link_up)
417 break;
418
419 if (lane_down[0])
420 pr_debug("XGBE: detected link down on lane 0\n");
421
422 if (lane_down[1])
423 pr_debug("XGBE: detected link down on lane 1\n");
424
425 if (++retries > 1) {
426 pr_debug("XGBE: timeout waiting for serdes link up\n");
427 return -ETIMEDOUT;
428 }
429 mdelay(100);
430 } while (!link_up);
431
432 pr_debug("XGBE: PCSR link is up\n");
433 return 0;
434}
435
436static void netcp_xgbe_serdes_setup_cm_c1_c2(void __iomem *serdes_regs,
437 int lane, int cm, int c1, int c2)
438{
439 int i;
440
441 for (i = 0; i < ARRAY_SIZE(cfg_cm_c1_c2); i++) {
442 reg_rmw(serdes_regs + cfg_cm_c1_c2[i].ofs + (0x200 * lane),
443 cfg_cm_c1_c2[i].val,
444 cfg_cm_c1_c2[i].mask);
445 }
446}
447
448static void netcp_xgbe_reset_serdes(void __iomem *serdes_regs)
449{
450 /* Toggle the POR_EN bit in CONFIG.CPU_CTRL */
451 /* enable POR_EN bit */
452 reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, POR_EN, POR_EN);
453 usleep_range(10, 100);
454
455 /* disable POR_EN bit */
456 reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, 0, POR_EN);
457 usleep_range(10, 100);
458}
459
460static int netcp_xgbe_serdes_config(void __iomem *serdes_regs,
461 void __iomem *sw_regs)
462{
463 u32 ret, i;
464
465 netcp_xgbe_serdes_pll_disable(serdes_regs);
466 netcp_xgbe_serdes_cmu_init(serdes_regs);
467
468 for (i = 0; i < 2; i++)
469 netcp_xgbe_serdes_lane_config(serdes_regs, i);
470
471 netcp_xgbe_serdes_com_enable(serdes_regs);
472 /* This is EVM + RTM-BOC specific */
473 for (i = 0; i < 2; i++)
474 netcp_xgbe_serdes_setup_cm_c1_c2(serdes_regs, i, 0, 0, 5);
475
476 netcp_xgbe_serdes_pll_enable(serdes_regs);
477 for (i = 0; i < 2; i++)
478 netcp_xgbe_serdes_lane_enable(serdes_regs, i);
479
480 /* SB PLL Status Poll */
481 ret = netcp_xgbe_wait_pll_locked(sw_regs);
482 if (ret)
483 return ret;
484
485 netcp_xgbe_serdes_enable_xgmii_port(sw_regs);
486 netcp_xgbe_serdes_check_lane(serdes_regs, sw_regs);
487 return ret;
488}
489
490int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs)
491{
492 u32 val;
493
494 /* read COMLANE bits 4:0 */
495 val = readl(serdes_regs + 0xa00);
496 if (val & 0x1f) {
497 pr_debug("XGBE: serdes already in operation - reset\n");
498 netcp_xgbe_reset_serdes(serdes_regs);
499 }
500 return netcp_xgbe_serdes_config(serdes_regs, xgbe_regs);
501}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index f2ff0074aac9..691ec936e88d 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -2540,7 +2540,7 @@ static void tlan_phy_power_down(struct net_device *dev)
2540 * This is abitrary. It is intended to make sure the 2540 * This is abitrary. It is intended to make sure the
2541 * transceiver settles. 2541 * transceiver settles.
2542 */ 2542 */
2543 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP); 2543 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
2544 2544
2545} 2545}
2546 2546
@@ -2561,7 +2561,7 @@ static void tlan_phy_power_up(struct net_device *dev)
2561 * transceiver. The TLAN docs say both 50 ms and 2561 * transceiver. The TLAN docs say both 50 ms and
2562 * 500 ms, so do the longer, just in case. 2562 * 500 ms, so do the longer, just in case.
2563 */ 2563 */
2564 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET); 2564 tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
2565 2565
2566} 2566}
2567 2567
@@ -2593,7 +2593,7 @@ static void tlan_phy_reset(struct net_device *dev)
2593 * I don't remember why I wait this long. 2593 * I don't remember why I wait this long.
2594 * I've changed this to 50ms, as it seems long enough. 2594 * I've changed this to 50ms, as it seems long enough.
2595 */ 2595 */
2596 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK); 2596 tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
2597 2597
2598} 2598}
2599 2599
@@ -2658,7 +2658,7 @@ static void tlan_phy_start_link(struct net_device *dev)
2658 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN 2658 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2659 | TLAN_NET_CFG_PHY_EN; 2659 | TLAN_NET_CFG_PHY_EN;
2660 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data); 2660 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2661 tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN); 2661 tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
2662 return; 2662 return;
2663 } else if (priv->phy_num == 0) { 2663 } else if (priv->phy_num == 0) {
2664 control = 0; 2664 control = 0;
@@ -2725,7 +2725,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
2725 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) && 2725 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2726 (priv->phy_num != 0)) { 2726 (priv->phy_num != 0)) {
2727 priv->phy_num = 0; 2727 priv->phy_num = 0;
2728 tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN); 2728 tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
2729 return; 2729 return;
2730 } 2730 }
2731 2731
@@ -2744,7 +2744,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
2744 2744
2745 /* Wait for 100 ms. No reason in partiticular. 2745 /* Wait for 100 ms. No reason in partiticular.
2746 */ 2746 */
2747 tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET); 2747 tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
2748 2748
2749} 2749}
2750 2750
@@ -2796,7 +2796,7 @@ static void tlan_phy_monitor(unsigned long data)
2796 /* set to external PHY */ 2796 /* set to external PHY */
2797 priv->phy_num = 1; 2797 priv->phy_num = 1;
2798 /* restart autonegotiation */ 2798 /* restart autonegotiation */
2799 tlan_set_timer(dev, 4 * HZ / 10, 2799 tlan_set_timer(dev, msecs_to_jiffies(400),
2800 TLAN_TIMER_PHY_PDOWN); 2800 TLAN_TIMER_PHY_PDOWN);
2801 return; 2801 return;
2802 } 2802 }
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index a191afc23b56..17e276651601 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1326,7 +1326,8 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1326 struct rhine_private *rp = netdev_priv(dev); 1326 struct rhine_private *rp = netdev_priv(dev);
1327 void __iomem *ioaddr = rp->base; 1327 void __iomem *ioaddr = rp->base;
1328 1328
1329 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); 1329 if (!rp->mii_if.force_media)
1330 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1330 1331
1331 if (rp->mii_if.full_duplex) 1332 if (rp->mii_if.full_duplex)
1332 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, 1333 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
@@ -1781,8 +1782,8 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1781 rp->tx_ring[entry].desc_length = 1782 rp->tx_ring[entry].desc_length =
1782 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1783 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1783 1784
1784 if (unlikely(vlan_tx_tag_present(skb))) { 1785 if (unlikely(skb_vlan_tag_present(skb))) {
1785 u16 vid_pcp = vlan_tx_tag_get(skb); 1786 u16 vid_pcp = skb_vlan_tag_get(skb);
1786 1787
1787 /* drop CFI/DEI bit, register needs VID and PCP */ 1788 /* drop CFI/DEI bit, register needs VID and PCP */
1788 vid_pcp = (vid_pcp & VLAN_VID_MASK) | 1789 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
@@ -1803,7 +1804,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1803 1804
1804 /* Non-x86 Todo: explicitly flush cache lines here. */ 1805 /* Non-x86 Todo: explicitly flush cache lines here. */
1805 1806
1806 if (vlan_tx_tag_present(skb)) 1807 if (skb_vlan_tag_present(skb))
1807 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ 1808 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1808 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); 1809 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1809 1810
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 282f83a63b67..c20206f83cc1 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2611,8 +2611,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2611 2611
2612 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; 2612 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2613 2613
2614 if (vlan_tx_tag_present(skb)) { 2614 if (skb_vlan_tag_present(skb)) {
2615 td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 2615 td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2616 td_ptr->tdesc1.TCR |= TCR0_VETAG; 2616 td_ptr->tdesc1.TCR |= TCR0_VETAG;
2617 } 2617 }
2618 2618
diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c
index 9edada85ed02..cd78b7cacc75 100644
--- a/drivers/net/fddi/skfp/smt.c
+++ b/drivers/net/fddi/skfp/smt.c
@@ -1736,18 +1736,6 @@ char *addr_to_string(struct fddi_addr *addr)
1736} 1736}
1737#endif 1737#endif
1738 1738
1739#ifdef AM29K
1740int smt_ifconfig(int argc, char *argv[])
1741{
1742 if (argc >= 2 && !strcmp(argv[0],"opt_bypass") &&
1743 !strcmp(argv[1],"yes")) {
1744 smc->mib.fddiSMTBypassPresent = 1 ;
1745 return 0;
1746 }
1747 return amdfddi_config(0, argc, argv);
1748}
1749#endif
1750
1751/* 1739/*
1752 * return static mac index 1740 * return static mac index
1753 */ 1741 */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 7cd4eb38abfa..208eb05446ba 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -217,7 +217,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
217static int netvsc_init_buf(struct hv_device *device) 217static int netvsc_init_buf(struct hv_device *device)
218{ 218{
219 int ret = 0; 219 int ret = 0;
220 int t; 220 unsigned long t;
221 struct netvsc_device *net_device; 221 struct netvsc_device *net_device;
222 struct nvsp_message *init_packet; 222 struct nvsp_message *init_packet;
223 struct net_device *ndev; 223 struct net_device *ndev;
@@ -409,7 +409,8 @@ static int negotiate_nvsp_ver(struct hv_device *device,
409 struct nvsp_message *init_packet, 409 struct nvsp_message *init_packet,
410 u32 nvsp_ver) 410 u32 nvsp_ver)
411{ 411{
412 int ret, t; 412 int ret;
413 unsigned long t;
413 414
414 memset(init_packet, 0, sizeof(struct nvsp_message)); 415 memset(init_packet, 0, sizeof(struct nvsp_message));
415 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; 416 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
@@ -684,9 +685,9 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
684 return ret_val; 685 return ret_val;
685} 686}
686 687
687u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, 688static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
688 unsigned int section_index, 689 unsigned int section_index,
689 struct hv_netvsc_packet *packet) 690 struct hv_netvsc_packet *packet)
690{ 691{
691 char *start = net_device->send_buf; 692 char *start = net_device->send_buf;
692 char *dest = (start + (section_index * net_device->send_section_size)); 693 char *dest = (start + (section_index * net_device->send_section_size));
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index ec0c40a8f653..7816d98bdddc 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -470,7 +470,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
470 struct rndis_query_request *query; 470 struct rndis_query_request *query;
471 struct rndis_query_complete *query_complete; 471 struct rndis_query_complete *query_complete;
472 int ret = 0; 472 int ret = 0;
473 int t; 473 unsigned long t;
474 474
475 if (!result) 475 if (!result)
476 return -EINVAL; 476 return -EINVAL;
@@ -560,7 +560,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
560 char macstr[2*ETH_ALEN+1]; 560 char macstr[2*ETH_ALEN+1];
561 u32 extlen = sizeof(struct rndis_config_parameter_info) + 561 u32 extlen = sizeof(struct rndis_config_parameter_info) +
562 2*NWADR_STRLEN + 4*ETH_ALEN; 562 2*NWADR_STRLEN + 4*ETH_ALEN;
563 int ret, t; 563 int ret;
564 unsigned long t;
564 565
565 request = get_rndis_request(rdev, RNDIS_MSG_SET, 566 request = get_rndis_request(rdev, RNDIS_MSG_SET,
566 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); 567 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
@@ -623,7 +624,8 @@ cleanup:
623 return ret; 624 return ret;
624} 625}
625 626
626int rndis_filter_set_offload_params(struct hv_device *hdev, 627static int
628rndis_filter_set_offload_params(struct hv_device *hdev,
627 struct ndis_offload_params *req_offloads) 629 struct ndis_offload_params *req_offloads)
628{ 630{
629 struct netvsc_device *nvdev = hv_get_drvdata(hdev); 631 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
@@ -634,7 +636,8 @@ int rndis_filter_set_offload_params(struct hv_device *hdev,
634 struct ndis_offload_params *offload_params; 636 struct ndis_offload_params *offload_params;
635 struct rndis_set_complete *set_complete; 637 struct rndis_set_complete *set_complete;
636 u32 extlen = sizeof(struct ndis_offload_params); 638 u32 extlen = sizeof(struct ndis_offload_params);
637 int ret, t; 639 int ret;
640 unsigned long t;
638 u32 vsp_version = nvdev->nvsp_version; 641 u32 vsp_version = nvdev->nvsp_version;
639 642
640 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) { 643 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
@@ -697,7 +700,7 @@ u8 netvsc_hash_key[HASH_KEYLEN] = {
697 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa 700 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
698}; 701};
699 702
700int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) 703static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
701{ 704{
702 struct net_device *ndev = rdev->net_dev->ndev; 705 struct net_device *ndev = rdev->net_dev->ndev;
703 struct rndis_request *request; 706 struct rndis_request *request;
@@ -708,7 +711,8 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
708 struct ndis_recv_scale_param *rssp; 711 struct ndis_recv_scale_param *rssp;
709 u32 *itab; 712 u32 *itab;
710 u8 *keyp; 713 u8 *keyp;
711 int i, t, ret; 714 int i, ret;
715 unsigned long t;
712 716
713 request = get_rndis_request( 717 request = get_rndis_request(
714 rdev, RNDIS_MSG_SET, 718 rdev, RNDIS_MSG_SET,
@@ -792,7 +796,8 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
792 struct rndis_set_request *set; 796 struct rndis_set_request *set;
793 struct rndis_set_complete *set_complete; 797 struct rndis_set_complete *set_complete;
794 u32 status; 798 u32 status;
795 int ret, t; 799 int ret;
800 unsigned long t;
796 struct net_device *ndev; 801 struct net_device *ndev;
797 802
798 ndev = dev->net_dev->ndev; 803 ndev = dev->net_dev->ndev;
@@ -848,7 +853,8 @@ static int rndis_filter_init_device(struct rndis_device *dev)
848 struct rndis_initialize_request *init; 853 struct rndis_initialize_request *init;
849 struct rndis_initialize_complete *init_complete; 854 struct rndis_initialize_complete *init_complete;
850 u32 status; 855 u32 status;
851 int ret, t; 856 int ret;
857 unsigned long t;
852 858
853 request = get_rndis_request(dev, RNDIS_MSG_INIT, 859 request = get_rndis_request(dev, RNDIS_MSG_INIT,
854 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); 860 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -998,7 +1004,7 @@ int rndis_filter_device_add(struct hv_device *dev,
998 struct netvsc_device_info *device_info = additional_info; 1004 struct netvsc_device_info *device_info = additional_info;
999 struct ndis_offload_params offloads; 1005 struct ndis_offload_params offloads;
1000 struct nvsp_message *init_packet; 1006 struct nvsp_message *init_packet;
1001 int t; 1007 unsigned long t;
1002 struct ndis_recv_scale_cap rsscap; 1008 struct ndis_recv_scale_cap rsscap;
1003 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); 1009 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1004 u32 mtu, size; 1010 u32 mtu, size;
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 1c0135620c62..7b051eacb7f1 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -427,7 +427,7 @@ at86rf230_reg_precious(struct device *dev, unsigned int reg)
427 } 427 }
428} 428}
429 429
430static struct regmap_config at86rf230_regmap_spi_config = { 430static const struct regmap_config at86rf230_regmap_spi_config = {
431 .reg_bits = 8, 431 .reg_bits = 8,
432 .val_bits = 8, 432 .val_bits = 8,
433 .write_flag_mask = CMD_REG | CMD_WRITE, 433 .write_flag_mask = CMD_REG | CMD_WRITE,
@@ -450,7 +450,7 @@ at86rf230_async_error_recover(void *context)
450 ieee802154_wake_queue(lp->hw); 450 ieee802154_wake_queue(lp->hw);
451} 451}
452 452
453static void 453static inline void
454at86rf230_async_error(struct at86rf230_local *lp, 454at86rf230_async_error(struct at86rf230_local *lp,
455 struct at86rf230_state_change *ctx, int rc) 455 struct at86rf230_state_change *ctx, int rc)
456{ 456{
@@ -524,7 +524,6 @@ at86rf230_async_state_assert(void *context)
524 } 524 }
525 } 525 }
526 526
527
528 dev_warn(&lp->spi->dev, "unexcept state change from 0x%02x to 0x%02x. Actual state: 0x%02x\n", 527 dev_warn(&lp->spi->dev, "unexcept state change from 0x%02x to 0x%02x. Actual state: 0x%02x\n",
529 ctx->from_state, ctx->to_state, trx_state); 528 ctx->from_state, ctx->to_state, trx_state);
530 } 529 }
@@ -655,7 +654,7 @@ at86rf230_async_state_change_start(void *context)
655 if (ctx->irq_enable) 654 if (ctx->irq_enable)
656 enable_irq(lp->spi->irq); 655 enable_irq(lp->spi->irq);
657 656
658 at86rf230_async_error(lp, &lp->state, rc); 657 at86rf230_async_error(lp, ctx, rc);
659 } 658 }
660} 659}
661 660
@@ -715,10 +714,7 @@ at86rf230_tx_complete(void *context)
715 714
716 enable_irq(lp->spi->irq); 715 enable_irq(lp->spi->irq);
717 716
718 if (lp->max_frame_retries <= 0) 717 ieee802154_xmit_complete(lp->hw, skb, !lp->tx_aret);
719 ieee802154_xmit_complete(lp->hw, skb, true);
720 else
721 ieee802154_xmit_complete(lp->hw, skb, false);
722} 718}
723 719
724static void 720static void
@@ -753,16 +749,13 @@ at86rf230_tx_trac_check(void *context)
753 * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver 749 * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver
754 * state to TX_ON. 750 * state to TX_ON.
755 */ 751 */
756 if (trac) { 752 if (trac)
757 at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, 753 at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
758 at86rf230_tx_trac_error, true); 754 at86rf230_tx_trac_error, true);
759 return; 755 else
760 } 756 at86rf230_tx_on(context);
761
762 at86rf230_tx_on(context);
763} 757}
764 758
765
766static void 759static void
767at86rf230_tx_trac_status(void *context) 760at86rf230_tx_trac_status(void *context)
768{ 761{
@@ -1082,7 +1075,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
1082 u16 addr = le16_to_cpu(filt->short_addr); 1075 u16 addr = le16_to_cpu(filt->short_addr);
1083 1076
1084 dev_vdbg(&lp->spi->dev, 1077 dev_vdbg(&lp->spi->dev,
1085 "at86rf230_set_hw_addr_filt called for saddr\n"); 1078 "at86rf230_set_hw_addr_filt called for saddr\n");
1086 __at86rf230_write(lp, RG_SHORT_ADDR_0, addr); 1079 __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
1087 __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8); 1080 __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
1088 } 1081 }
@@ -1091,7 +1084,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
1091 u16 pan = le16_to_cpu(filt->pan_id); 1084 u16 pan = le16_to_cpu(filt->pan_id);
1092 1085
1093 dev_vdbg(&lp->spi->dev, 1086 dev_vdbg(&lp->spi->dev,
1094 "at86rf230_set_hw_addr_filt called for pan id\n"); 1087 "at86rf230_set_hw_addr_filt called for pan id\n");
1095 __at86rf230_write(lp, RG_PAN_ID_0, pan); 1088 __at86rf230_write(lp, RG_PAN_ID_0, pan);
1096 __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8); 1089 __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
1097 } 1090 }
@@ -1101,14 +1094,14 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
1101 1094
1102 memcpy(addr, &filt->ieee_addr, 8); 1095 memcpy(addr, &filt->ieee_addr, 8);
1103 dev_vdbg(&lp->spi->dev, 1096 dev_vdbg(&lp->spi->dev,
1104 "at86rf230_set_hw_addr_filt called for IEEE addr\n"); 1097 "at86rf230_set_hw_addr_filt called for IEEE addr\n");
1105 for (i = 0; i < 8; i++) 1098 for (i = 0; i < 8; i++)
1106 __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]); 1099 __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
1107 } 1100 }
1108 1101
1109 if (changed & IEEE802154_AFILT_PANC_CHANGED) { 1102 if (changed & IEEE802154_AFILT_PANC_CHANGED) {
1110 dev_vdbg(&lp->spi->dev, 1103 dev_vdbg(&lp->spi->dev,
1111 "at86rf230_set_hw_addr_filt called for panc change\n"); 1104 "at86rf230_set_hw_addr_filt called for panc change\n");
1112 if (filt->pan_coord) 1105 if (filt->pan_coord)
1113 at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1); 1106 at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
1114 else 1107 else
@@ -1146,11 +1139,37 @@ at86rf230_set_lbt(struct ieee802154_hw *hw, bool on)
1146} 1139}
1147 1140
1148static int 1141static int
1149at86rf230_set_cca_mode(struct ieee802154_hw *hw, u8 mode) 1142at86rf230_set_cca_mode(struct ieee802154_hw *hw,
1143 const struct wpan_phy_cca *cca)
1150{ 1144{
1151 struct at86rf230_local *lp = hw->priv; 1145 struct at86rf230_local *lp = hw->priv;
1146 u8 val;
1147
1148 /* mapping 802.15.4 to driver spec */
1149 switch (cca->mode) {
1150 case NL802154_CCA_ENERGY:
1151 val = 1;
1152 break;
1153 case NL802154_CCA_CARRIER:
1154 val = 2;
1155 break;
1156 case NL802154_CCA_ENERGY_CARRIER:
1157 switch (cca->opt) {
1158 case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
1159 val = 3;
1160 break;
1161 case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
1162 val = 0;
1163 break;
1164 default:
1165 return -EINVAL;
1166 }
1167 break;
1168 default:
1169 return -EINVAL;
1170 }
1152 1171
1153 return at86rf230_write_subreg(lp, SR_CCA_MODE, mode); 1172 return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
1154} 1173}
1155 1174
1156static int 1175static int
@@ -1400,7 +1419,7 @@ at86rf230_detect_device(struct at86rf230_local *lp)
1400 if (rc) 1419 if (rc)
1401 return rc; 1420 return rc;
1402 1421
1403 rc = __at86rf230_read(lp, RG_PART_NUM, &version); 1422 rc = __at86rf230_read(lp, RG_VERSION_NUM, &version);
1404 if (rc) 1423 if (rc)
1405 return rc; 1424 return rc;
1406 1425
@@ -1410,11 +1429,12 @@ at86rf230_detect_device(struct at86rf230_local *lp)
1410 return -EINVAL; 1429 return -EINVAL;
1411 } 1430 }
1412 1431
1413 lp->hw->extra_tx_headroom = 0;
1414 lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK | 1432 lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
1415 IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET | 1433 IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
1416 IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS; 1434 IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
1417 1435
1436 lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
1437
1418 switch (part) { 1438 switch (part) {
1419 case 2: 1439 case 2:
1420 chip = "at86rf230"; 1440 chip = "at86rf230";
@@ -1429,16 +1449,12 @@ at86rf230_detect_device(struct at86rf230_local *lp)
1429 break; 1449 break;
1430 case 7: 1450 case 7:
1431 chip = "at86rf212"; 1451 chip = "at86rf212";
1432 if (version == 1) { 1452 lp->data = &at86rf212_data;
1433 lp->data = &at86rf212_data; 1453 lp->hw->flags |= IEEE802154_HW_LBT;
1434 lp->hw->flags |= IEEE802154_HW_LBT; 1454 lp->hw->phy->channels_supported[0] = 0x00007FF;
1435 lp->hw->phy->channels_supported[0] = 0x00007FF; 1455 lp->hw->phy->channels_supported[2] = 0x00007FF;
1436 lp->hw->phy->channels_supported[2] = 0x00007FF; 1456 lp->hw->phy->current_channel = 5;
1437 lp->hw->phy->current_channel = 5; 1457 lp->hw->phy->symbol_duration = 25;
1438 lp->hw->phy->symbol_duration = 25;
1439 } else {
1440 rc = -ENOTSUPP;
1441 }
1442 break; 1458 break;
1443 case 11: 1459 case 11:
1444 chip = "at86rf233"; 1460 chip = "at86rf233";
@@ -1448,7 +1464,7 @@ at86rf230_detect_device(struct at86rf230_local *lp)
1448 lp->hw->phy->symbol_duration = 16; 1464 lp->hw->phy->symbol_duration = 16;
1449 break; 1465 break;
1450 default: 1466 default:
1451 chip = "unkown"; 1467 chip = "unknown";
1452 rc = -ENOTSUPP; 1468 rc = -ENOTSUPP;
1453 break; 1469 break;
1454 } 1470 }
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index f9df9fa86d5f..181b349b060e 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -19,7 +19,6 @@
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/pinctrl/consumer.h>
23#include <linux/of_gpio.h> 22#include <linux/of_gpio.h>
24#include <linux/ieee802154.h> 23#include <linux/ieee802154.h>
25 24
@@ -45,9 +44,9 @@
45#define CC2520_FREG_MASK 0x3F 44#define CC2520_FREG_MASK 0x3F
46 45
47/* status byte values */ 46/* status byte values */
48#define CC2520_STATUS_XOSC32M_STABLE (1 << 7) 47#define CC2520_STATUS_XOSC32M_STABLE BIT(7)
49#define CC2520_STATUS_RSSI_VALID (1 << 6) 48#define CC2520_STATUS_RSSI_VALID BIT(6)
50#define CC2520_STATUS_TX_UNDERFLOW (1 << 3) 49#define CC2520_STATUS_TX_UNDERFLOW BIT(3)
51 50
52/* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */ 51/* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
53#define CC2520_MINCHANNEL 11 52#define CC2520_MINCHANNEL 11
@@ -513,7 +512,6 @@ err_tx:
513 return rc; 512 return rc;
514} 513}
515 514
516
517static int cc2520_rx(struct cc2520_private *priv) 515static int cc2520_rx(struct cc2520_private *priv)
518{ 516{
519 u8 len = 0, lqi = 0, bytes = 1; 517 u8 len = 0, lqi = 0, bytes = 1;
@@ -551,14 +549,14 @@ cc2520_ed(struct ieee802154_hw *hw, u8 *level)
551 u8 rssi; 549 u8 rssi;
552 int ret; 550 int ret;
553 551
554 ret = cc2520_read_register(priv , CC2520_RSSISTAT, &status); 552 ret = cc2520_read_register(priv, CC2520_RSSISTAT, &status);
555 if (ret) 553 if (ret)
556 return ret; 554 return ret;
557 555
558 if (status != RSSI_VALID) 556 if (status != RSSI_VALID)
559 return -EINVAL; 557 return -EINVAL;
560 558
561 ret = cc2520_read_register(priv , CC2520_RSSI, &rssi); 559 ret = cc2520_read_register(priv, CC2520_RSSI, &rssi);
562 if (ret) 560 if (ret)
563 return ret; 561 return ret;
564 562
@@ -652,6 +650,7 @@ static int cc2520_register(struct cc2520_private *priv)
652 priv->hw->parent = &priv->spi->dev; 650 priv->hw->parent = &priv->spi->dev;
653 priv->hw->extra_tx_headroom = 0; 651 priv->hw->extra_tx_headroom = 0;
654 priv->hw->vif_data_size = sizeof(*priv); 652 priv->hw->vif_data_size = sizeof(*priv);
653 ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr);
655 654
656 /* We do support only 2.4 Ghz */ 655 /* We do support only 2.4 Ghz */
657 priv->hw->phy->channels_supported[0] = 0x7FFF800; 656 priv->hw->phy->channels_supported[0] = 0x7FFF800;
@@ -842,24 +841,15 @@ done:
842static int cc2520_probe(struct spi_device *spi) 841static int cc2520_probe(struct spi_device *spi)
843{ 842{
844 struct cc2520_private *priv; 843 struct cc2520_private *priv;
845 struct pinctrl *pinctrl;
846 struct cc2520_platform_data *pdata; 844 struct cc2520_platform_data *pdata;
847 int ret; 845 int ret;
848 846
849 priv = devm_kzalloc(&spi->dev, 847 priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
850 sizeof(struct cc2520_private), GFP_KERNEL); 848 if (!priv)
851 if (!priv) { 849 return -ENOMEM;
852 ret = -ENOMEM;
853 goto err_ret;
854 }
855 850
856 spi_set_drvdata(spi, priv); 851 spi_set_drvdata(spi, priv);
857 852
858 pinctrl = devm_pinctrl_get_select_default(&spi->dev);
859 if (IS_ERR(pinctrl))
860 dev_warn(&spi->dev,
861 "pinctrl pins are not configured\n");
862
863 pdata = cc2520_get_platform_data(spi); 853 pdata = cc2520_get_platform_data(spi);
864 if (!pdata) { 854 if (!pdata) {
865 dev_err(&spi->dev, "no platform data\n"); 855 dev_err(&spi->dev, "no platform data\n");
@@ -870,10 +860,8 @@ static int cc2520_probe(struct spi_device *spi)
870 860
871 priv->buf = devm_kzalloc(&spi->dev, 861 priv->buf = devm_kzalloc(&spi->dev,
872 SPI_COMMAND_BUFFER, GFP_KERNEL); 862 SPI_COMMAND_BUFFER, GFP_KERNEL);
873 if (!priv->buf) { 863 if (!priv->buf)
874 ret = -ENOMEM; 864 return -ENOMEM;
875 goto err_ret;
876 }
877 865
878 mutex_init(&priv->buffer_mutex); 866 mutex_init(&priv->buffer_mutex);
879 INIT_WORK(&priv->fifop_irqwork, cc2520_fifop_irqwork); 867 INIT_WORK(&priv->fifop_irqwork, cc2520_fifop_irqwork);
@@ -947,7 +935,6 @@ static int cc2520_probe(struct spi_device *spi)
947 if (ret) 935 if (ret)
948 goto err_hw_init; 936 goto err_hw_init;
949 937
950
951 gpio_set_value(pdata->vreg, HIGH); 938 gpio_set_value(pdata->vreg, HIGH);
952 usleep_range(100, 150); 939 usleep_range(100, 150);
953 940
@@ -991,8 +978,6 @@ static int cc2520_probe(struct spi_device *spi)
991err_hw_init: 978err_hw_init:
992 mutex_destroy(&priv->buffer_mutex); 979 mutex_destroy(&priv->buffer_mutex);
993 flush_work(&priv->fifop_irqwork); 980 flush_work(&priv->fifop_irqwork);
994
995err_ret:
996 return ret; 981 return ret;
997} 982}
998 983
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index a200fa16beae..fba2dfd910f7 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -289,7 +289,7 @@ static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
289 goto out; 289 goto out;
290 290
291 /* Range check the RX FIFO length, accounting for the one-byte 291 /* Range check the RX FIFO length, accounting for the one-byte
292 * length field at the begining. */ 292 * length field at the beginning. */
293 if (rx_len > RX_FIFO_SIZE-1) { 293 if (rx_len > RX_FIFO_SIZE-1) {
294 dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n"); 294 dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
295 rx_len = RX_FIFO_SIZE-1; 295 rx_len = RX_FIFO_SIZE-1;
@@ -323,7 +323,7 @@ static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
323 323
324#ifdef DEBUG 324#ifdef DEBUG
325 print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ", 325 print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
326 DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0); 326 DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
327 pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n", 327 pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
328 lqi_rssi[0], lqi_rssi[1]); 328 lqi_rssi[0], lqi_rssi[1]);
329#endif 329#endif
@@ -521,7 +521,7 @@ static int mrf24j40_filter(struct ieee802154_hw *hw,
521 */ 521 */
522 522
523 dev_dbg(printdev(devrec), "Set Pan Coord to %s\n", 523 dev_dbg(printdev(devrec), "Set Pan Coord to %s\n",
524 filt->pan_coord ? "on" : "off"); 524 filt->pan_coord ? "on" : "off");
525 } 525 }
526 526
527 return 0; 527 return 0;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2e195289ddf4..2a175006028b 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -9,7 +9,7 @@
9 9
10#include "ipvlan.h" 10#include "ipvlan.h"
11 11
12static u32 ipvlan_jhash_secret; 12static u32 ipvlan_jhash_secret __read_mostly;
13 13
14void ipvlan_init_secret(void) 14void ipvlan_init_secret(void)
15{ 15{
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 58f98f4de773..58ae11a14bb6 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1462,17 +1462,12 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
1462 if (mtt) 1462 if (mtt)
1463 { 1463 {
1464 /* Check how much time we have used already */ 1464 /* Check how much time we have used already */
1465 do_gettimeofday(&self->now); 1465 diff = ktime_us_delta(ktime_get(), self->stamp);
1466
1467 diff = self->now.tv_usec - self->stamp.tv_usec;
1468 /* self->stamp is set from ali_ircc_dma_receive_complete() */ 1466 /* self->stamp is set from ali_ircc_dma_receive_complete() */
1469 1467
1470 pr_debug("%s(), ******* diff = %d *******\n", 1468 pr_debug("%s(), ******* diff = %d *******\n",
1471 __func__, diff); 1469 __func__, diff);
1472 1470
1473 if (diff < 0)
1474 diff += 1000000;
1475
1476 /* Check if the mtt is larger than the time we have 1471 /* Check if the mtt is larger than the time we have
1477 * already used by all the protocol processing 1472 * already used by all the protocol processing
1478 */ 1473 */
@@ -1884,7 +1879,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1884 * reduce the min turn time a bit since we will know 1879 * reduce the min turn time a bit since we will know
1885 * how much time we have used for protocol processing 1880 * how much time we have used for protocol processing
1886 */ 1881 */
1887 do_gettimeofday(&self->stamp); 1882 self->stamp = ktime_get();
1888 1883
1889 skb = dev_alloc_skb(len+1); 1884 skb = dev_alloc_skb(len+1);
1890 if (skb == NULL) 1885 if (skb == NULL)
diff --git a/drivers/net/irda/ali-ircc.h b/drivers/net/irda/ali-ircc.h
index 0c8edb41bd0a..c2d9747a5108 100644
--- a/drivers/net/irda/ali-ircc.h
+++ b/drivers/net/irda/ali-ircc.h
@@ -22,7 +22,7 @@
22#ifndef ALI_IRCC_H 22#ifndef ALI_IRCC_H
23#define ALI_IRCC_H 23#define ALI_IRCC_H
24 24
25#include <linux/time.h> 25#include <linux/ktime.h>
26 26
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/pm.h> 28#include <linux/pm.h>
@@ -209,8 +209,7 @@ struct ali_ircc_cb {
209 209
210 unsigned char rcvFramesOverflow; 210 unsigned char rcvFramesOverflow;
211 211
212 struct timeval stamp; 212 ktime_t stamp;
213 struct timeval now;
214 213
215 spinlock_t lock; /* For serializing operations */ 214 spinlock_t lock; /* For serializing operations */
216 215
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index e151205281e2..44e4f386a5dc 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -24,7 +24,6 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/time.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/ioport.h> 28#include <linux/ioport.h>
30 29
@@ -163,8 +162,6 @@ struct au1k_private {
163 iobuff_t rx_buff; 162 iobuff_t rx_buff;
164 163
165 struct net_device *netdev; 164 struct net_device *netdev;
166 struct timeval stamp;
167 struct timeval now;
168 struct qos_info qos; 165 struct qos_info qos;
169 struct irlap_cb *irlap; 166 struct irlap_cb *irlap;
170 167
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 48b2f9a321b7..f6c916312577 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -495,18 +495,12 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
495 mtt = irda_get_mtt(skb); 495 mtt = irda_get_mtt(skb);
496 if (mtt) { 496 if (mtt) {
497 int diff; 497 int diff;
498 do_gettimeofday(&self->now); 498 diff = ktime_us_delta(ktime_get(), self->stamp);
499 diff = self->now.tv_usec - self->stamp.tv_usec;
500#ifdef IU_USB_MIN_RTT 499#ifdef IU_USB_MIN_RTT
501 /* Factor in USB delays -> Get rid of udelay() that 500 /* Factor in USB delays -> Get rid of udelay() that
502 * would be lost in the noise - Jean II */ 501 * would be lost in the noise - Jean II */
503 diff += IU_USB_MIN_RTT; 502 diff += IU_USB_MIN_RTT;
504#endif /* IU_USB_MIN_RTT */ 503#endif /* IU_USB_MIN_RTT */
505 /* If the usec counter did wraparound, the diff will
506 * go negative (tv_usec is a long), so we need to
507 * correct it by one second. Jean II */
508 if (diff < 0)
509 diff += 1000000;
510 504
511 /* Check if the mtt is larger than the time we have 505 /* Check if the mtt is larger than the time we have
512 * already used by all the protocol processing 506 * already used by all the protocol processing
@@ -869,7 +863,7 @@ static void irda_usb_receive(struct urb *urb)
869 * reduce the min turn time a bit since we will know 863 * reduce the min turn time a bit since we will know
870 * how much time we have used for protocol processing 864 * how much time we have used for protocol processing
871 */ 865 */
872 do_gettimeofday(&self->stamp); 866 self->stamp = ktime_get();
873 867
874 /* Check if we need to copy the data to a new skb or not. 868 /* Check if we need to copy the data to a new skb or not.
875 * For most frames, we use ZeroCopy and pass the already 869 * For most frames, we use ZeroCopy and pass the already
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index 58ddb5214916..8ac389fa9348 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -29,7 +29,7 @@
29 * 29 *
30 *****************************************************************************/ 30 *****************************************************************************/
31 31
32#include <linux/time.h> 32#include <linux/ktime.h>
33 33
34#include <net/irda/irda.h> 34#include <net/irda/irda.h>
35#include <net/irda/irda_device.h> /* struct irlap_cb */ 35#include <net/irda/irda_device.h> /* struct irlap_cb */
@@ -157,8 +157,7 @@ struct irda_usb_cb {
157 char *speed_buff; /* Buffer for speed changes */ 157 char *speed_buff; /* Buffer for speed changes */
158 char *tx_buff; 158 char *tx_buff;
159 159
160 struct timeval stamp; 160 ktime_t stamp;
161 struct timeval now;
162 161
163 spinlock_t lock; /* For serializing Tx operations */ 162 spinlock_t lock; /* For serializing Tx operations */
164 163
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index e638893e98a9..fb5d162ec7d2 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -114,7 +114,6 @@ struct kingsun_cb {
114 (usually 8) */ 114 (usually 8) */
115 115
116 iobuff_t rx_buff; /* receive unwrap state machine */ 116 iobuff_t rx_buff; /* receive unwrap state machine */
117 struct timeval rx_time;
118 spinlock_t lock; 117 spinlock_t lock;
119 int receiving; 118 int receiving;
120 119
@@ -235,7 +234,6 @@ static void kingsun_rcv_irq(struct urb *urb)
235 &kingsun->netdev->stats, 234 &kingsun->netdev->stats,
236 &kingsun->rx_buff, bytes[i]); 235 &kingsun->rx_buff, bytes[i]);
237 } 236 }
238 do_gettimeofday(&kingsun->rx_time);
239 kingsun->receiving = 237 kingsun->receiving =
240 (kingsun->rx_buff.state != OUTSIDE_FRAME) 238 (kingsun->rx_buff.state != OUTSIDE_FRAME)
241 ? 1 : 0; 239 ? 1 : 0;
@@ -273,7 +271,6 @@ static int kingsun_net_open(struct net_device *netdev)
273 271
274 skb_reserve(kingsun->rx_buff.skb, 1); 272 skb_reserve(kingsun->rx_buff.skb, 1);
275 kingsun->rx_buff.head = kingsun->rx_buff.skb->data; 273 kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
276 do_gettimeofday(&kingsun->rx_time);
277 274
278 kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 275 kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
279 if (!kingsun->rx_urb) 276 if (!kingsun->rx_urb)
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index e6b3804edacd..8e6e0edf2440 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -187,7 +187,6 @@ struct ks959_cb {
187 __u8 *rx_buf; 187 __u8 *rx_buf;
188 __u8 rx_variable_xormask; 188 __u8 rx_variable_xormask;
189 iobuff_t rx_unwrap_buff; 189 iobuff_t rx_unwrap_buff;
190 struct timeval rx_time;
191 190
192 struct usb_ctrlrequest *speed_setuprequest; 191 struct usb_ctrlrequest *speed_setuprequest;
193 struct urb *speed_urb; 192 struct urb *speed_urb;
@@ -476,7 +475,6 @@ static void ks959_rcv_irq(struct urb *urb)
476 bytes[i]); 475 bytes[i]);
477 } 476 }
478 } 477 }
479 do_gettimeofday(&kingsun->rx_time);
480 kingsun->receiving = 478 kingsun->receiving =
481 (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0; 479 (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
482 } 480 }
@@ -514,7 +512,6 @@ static int ks959_net_open(struct net_device *netdev)
514 512
515 skb_reserve(kingsun->rx_unwrap_buff.skb, 1); 513 skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
516 kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data; 514 kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
517 do_gettimeofday(&kingsun->rx_time);
518 515
519 kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 516 kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
520 if (!kingsun->rx_urb) 517 if (!kingsun->rx_urb)
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index e4d678fbeb2f..bca6a1e72d1d 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -722,7 +722,6 @@ static int mcs_net_open(struct net_device *netdev)
722 722
723 skb_reserve(mcs->rx_buff.skb, 1); 723 skb_reserve(mcs->rx_buff.skb, 1);
724 mcs->rx_buff.head = mcs->rx_buff.skb->data; 724 mcs->rx_buff.head = mcs->rx_buff.skb->data;
725 do_gettimeofday(&mcs->rx_time);
726 725
727 /* 726 /*
728 * Now that everything should be initialized properly, 727 * Now that everything should be initialized properly,
@@ -799,7 +798,6 @@ static void mcs_receive_irq(struct urb *urb)
799 mcs_unwrap_fir(mcs, urb->transfer_buffer, 798 mcs_unwrap_fir(mcs, urb->transfer_buffer,
800 urb->actual_length); 799 urb->actual_length);
801 } 800 }
802 do_gettimeofday(&mcs->rx_time);
803 } 801 }
804 802
805 ret = usb_submit_urb(urb, GFP_ATOMIC); 803 ret = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/net/irda/mcs7780.h b/drivers/net/irda/mcs7780.h
index b10689b2887c..a6e8f7dbafc9 100644
--- a/drivers/net/irda/mcs7780.h
+++ b/drivers/net/irda/mcs7780.h
@@ -116,7 +116,6 @@ struct mcs_cb {
116 __u8 *fifo_status; 116 __u8 *fifo_status;
117 117
118 iobuff_t rx_buff; /* receive unwrap state machine */ 118 iobuff_t rx_buff; /* receive unwrap state machine */
119 struct timeval rx_time;
120 spinlock_t lock; 119 spinlock_t lock;
121 int receiving; 120 int receiving;
122 121
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index e7317b104bfb..dc0dbd8dd0b5 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1501,10 +1501,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
1501 mtt = irda_get_mtt(skb); 1501 mtt = irda_get_mtt(skb);
1502 if (mtt) { 1502 if (mtt) {
1503 /* Check how much time we have used already */ 1503 /* Check how much time we have used already */
1504 do_gettimeofday(&self->now); 1504 diff = ktime_us_delta(ktime_get(), self->stamp);
1505 diff = self->now.tv_usec - self->stamp.tv_usec;
1506 if (diff < 0)
1507 diff += 1000000;
1508 1505
1509 /* Check if the mtt is larger than the time we have 1506 /* Check if the mtt is larger than the time we have
1510 * already used by all the protocol processing 1507 * already used by all the protocol processing
@@ -1867,7 +1864,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1867 * reduce the min turn time a bit since we will know 1864 * reduce the min turn time a bit since we will know
1868 * how much time we have used for protocol processing 1865 * how much time we have used for protocol processing
1869 */ 1866 */
1870 do_gettimeofday(&self->stamp); 1867 self->stamp = ktime_get();
1871 1868
1872 skb = dev_alloc_skb(len+1); 1869 skb = dev_alloc_skb(len+1);
1873 if (skb == NULL) { 1870 if (skb == NULL) {
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 32fa58211fad..7be5acb56532 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -28,7 +28,7 @@
28#ifndef NSC_IRCC_H 28#ifndef NSC_IRCC_H
29#define NSC_IRCC_H 29#define NSC_IRCC_H
30 30
31#include <linux/time.h> 31#include <linux/ktime.h>
32 32
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/pm.h> 34#include <linux/pm.h>
@@ -263,8 +263,7 @@ struct nsc_ircc_cb {
263 263
264 __u8 ier; /* Interrupt enable register */ 264 __u8 ier; /* Interrupt enable register */
265 265
266 struct timeval stamp; 266 ktime_t stamp;
267 struct timeval now;
268 267
269 spinlock_t lock; /* For serializing operations */ 268 spinlock_t lock; /* For serializing operations */
270 269
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 7b17fa2114e1..b6e44ff4e373 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -38,7 +38,7 @@
38#include <net/irda/irda_device.h> 38#include <net/irda/irda_device.h>
39 39
40#include <mach/hardware.h> 40#include <mach/hardware.h>
41#include <asm/mach/irda.h> 41#include <linux/platform_data/irda-sa11x0.h>
42 42
43static int power_level = 3; 43static int power_level = 3;
44static int tx_lpm; 44static int tx_lpm;
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index dd1bd1060ec9..83cc48a01802 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -40,6 +40,7 @@
40#include <linux/moduleparam.h> 40#include <linux/moduleparam.h>
41 41
42#include <linux/kernel.h> 42#include <linux/kernel.h>
43#include <linux/ktime.h>
43#include <linux/types.h> 44#include <linux/types.h>
44#include <linux/time.h> 45#include <linux/time.h>
45#include <linux/skbuff.h> 46#include <linux/skbuff.h>
@@ -174,7 +175,7 @@ struct stir_cb {
174 __u8 *fifo_status; 175 __u8 *fifo_status;
175 176
176 iobuff_t rx_buff; /* receive unwrap state machine */ 177 iobuff_t rx_buff; /* receive unwrap state machine */
177 struct timeval rx_time; 178 ktime_t rx_time;
178 int receiving; 179 int receiving;
179 struct urb *rx_urb; 180 struct urb *rx_urb;
180}; 181};
@@ -650,15 +651,12 @@ static int fifo_txwait(struct stir_cb *stir, int space)
650static void turnaround_delay(const struct stir_cb *stir, long us) 651static void turnaround_delay(const struct stir_cb *stir, long us)
651{ 652{
652 long ticks; 653 long ticks;
653 struct timeval now;
654 654
655 if (us <= 0) 655 if (us <= 0)
656 return; 656 return;
657 657
658 do_gettimeofday(&now); 658 us -= ktime_us_delta(ktime_get(), stir->rx_time);
659 if (now.tv_sec - stir->rx_time.tv_sec > 0) 659
660 us -= USEC_PER_SEC;
661 us -= now.tv_usec - stir->rx_time.tv_usec;
662 if (us < 10) 660 if (us < 10)
663 return; 661 return;
664 662
@@ -823,8 +821,8 @@ static void stir_rcv_irq(struct urb *urb)
823 pr_debug("receive %d\n", urb->actual_length); 821 pr_debug("receive %d\n", urb->actual_length);
824 unwrap_chars(stir, urb->transfer_buffer, 822 unwrap_chars(stir, urb->transfer_buffer,
825 urb->actual_length); 823 urb->actual_length);
826 824
827 do_gettimeofday(&stir->rx_time); 825 stir->rx_time = ktime_get();
828 } 826 }
829 827
830 /* kernel thread is stopping receiver don't resubmit */ 828 /* kernel thread is stopping receiver don't resubmit */
@@ -876,7 +874,7 @@ static int stir_net_open(struct net_device *netdev)
876 874
877 skb_reserve(stir->rx_buff.skb, 1); 875 skb_reserve(stir->rx_buff.skb, 1);
878 stir->rx_buff.head = stir->rx_buff.skb->data; 876 stir->rx_buff.head = stir->rx_buff.skb->data;
879 do_gettimeofday(&stir->rx_time); 877 stir->rx_time = ktime_get();
880 878
881 stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 879 stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
882 if (!stir->rx_urb) 880 if (!stir->rx_urb)
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index 7ce820ecc361..ac1525573398 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -29,7 +29,6 @@ this program; if not, see <http://www.gnu.org/licenses/>.
29 ********************************************************************/ 29 ********************************************************************/
30#ifndef via_IRCC_H 30#ifndef via_IRCC_H
31#define via_IRCC_H 31#define via_IRCC_H
32#include <linux/time.h>
33#include <linux/spinlock.h> 32#include <linux/spinlock.h>
34#include <linux/pm.h> 33#include <linux/pm.h>
35#include <linux/types.h> 34#include <linux/types.h>
@@ -106,9 +105,6 @@ struct via_ircc_cb {
106 105
107 __u8 ier; /* Interrupt enable register */ 106 __u8 ier; /* Interrupt enable register */
108 107
109 struct timeval stamp;
110 struct timeval now;
111
112 spinlock_t lock; /* For serializing operations */ 108 spinlock_t lock; /* For serializing operations */
113 109
114 __u32 flags; /* Interface flags */ 110 __u32 flags; /* Interface flags */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index ac39d9f33d5f..a0849f49bbec 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -33,6 +33,7 @@ MODULE_LICENSE("GPL");
33/********************************************************/ 33/********************************************************/
34 34
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/ktime.h>
36#include <linux/init.h> 37#include <linux/init.h>
37#include <linux/interrupt.h> 38#include <linux/interrupt.h>
38#include <linux/pci.h> 39#include <linux/pci.h>
@@ -40,9 +41,9 @@ MODULE_LICENSE("GPL");
40#include <linux/netdevice.h> 41#include <linux/netdevice.h>
41#include <linux/skbuff.h> 42#include <linux/skbuff.h>
42#include <linux/delay.h> 43#include <linux/delay.h>
43#include <linux/time.h>
44#include <linux/proc_fs.h> 44#include <linux/proc_fs.h>
45#include <linux/seq_file.h> 45#include <linux/seq_file.h>
46#include <linux/math64.h>
46#include <linux/mutex.h> 47#include <linux/mutex.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48#include <asm/byteorder.h> 49#include <asm/byteorder.h>
@@ -180,8 +181,7 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
180 vlsi_irda_dev_t *idev = netdev_priv(ndev); 181 vlsi_irda_dev_t *idev = netdev_priv(ndev);
181 u8 byte; 182 u8 byte;
182 u16 word; 183 u16 word;
183 unsigned delta1, delta2; 184 s32 sec, usec;
184 struct timeval now;
185 unsigned iobase = ndev->base_addr; 185 unsigned iobase = ndev->base_addr;
186 186
187 seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name, 187 seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
@@ -277,17 +277,9 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
277 seq_printf(seq, "\nsw-state:\n"); 277 seq_printf(seq, "\nsw-state:\n");
278 seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 278 seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
279 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR")); 279 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
280 do_gettimeofday(&now); 280 sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
281 if (now.tv_usec >= idev->last_rx.tv_usec) { 281 USEC_PER_SEC, &usec);
282 delta2 = now.tv_usec - idev->last_rx.tv_usec; 282 seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec);
283 delta1 = 0;
284 }
285 else {
286 delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec;
287 delta1 = 1;
288 }
289 seq_printf(seq, "last rx: %lu.%06u sec\n",
290 now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);
291 283
292 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu", 284 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
293 ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors, 285 ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
@@ -661,7 +653,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
661 } 653 }
662 } 654 }
663 655
664 do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */ 656 idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */
665 657
666 vlsi_fill_rx(r); 658 vlsi_fill_rx(r);
667 659
@@ -858,9 +850,8 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
858 unsigned iobase = ndev->base_addr; 850 unsigned iobase = ndev->base_addr;
859 u8 status; 851 u8 status;
860 u16 config; 852 u16 config;
861 int mtt; 853 int mtt, diff;
862 int len, speed; 854 int len, speed;
863 struct timeval now, ready;
864 char *msg = NULL; 855 char *msg = NULL;
865 856
866 speed = irda_get_next_speed(skb); 857 speed = irda_get_next_speed(skb);
@@ -940,21 +931,10 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
940 spin_unlock_irqrestore(&idev->lock, flags); 931 spin_unlock_irqrestore(&idev->lock, flags);
941 932
942 if ((mtt = irda_get_mtt(skb)) > 0) { 933 if ((mtt = irda_get_mtt(skb)) > 0) {
943 934 diff = ktime_us_delta(ktime_get(), idev->last_rx);
944 ready.tv_usec = idev->last_rx.tv_usec + mtt; 935 if (mtt > diff)
945 ready.tv_sec = idev->last_rx.tv_sec; 936 udelay(mtt - diff);
946 if (ready.tv_usec >= 1000000) {
947 ready.tv_usec -= 1000000;
948 ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */
949 }
950 for(;;) {
951 do_gettimeofday(&now);
952 if (now.tv_sec > ready.tv_sec ||
953 (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
954 break;
955 udelay(100);
956 /* must not sleep here - called under netif_tx_lock! */ 937 /* must not sleep here - called under netif_tx_lock! */
957 }
958 } 938 }
959 939
960 /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu() 940 /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
@@ -1333,7 +1313,7 @@ static int vlsi_start_hw(vlsi_irda_dev_t *idev)
1333 1313
1334 vlsi_fill_rx(idev->rx_ring); 1314 vlsi_fill_rx(idev->rx_ring);
1335 1315
1336 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */ 1316 idev->last_rx = ktime_get(); /* first mtt may start from now on */
1337 1317
1338 outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */ 1318 outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */
1339 1319
@@ -1520,7 +1500,7 @@ static int vlsi_open(struct net_device *ndev)
1520 if (!idev->irlap) 1500 if (!idev->irlap)
1521 goto errout_free_ring; 1501 goto errout_free_ring;
1522 1502
1523 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */ 1503 idev->last_rx = ktime_get(); /* first mtt may start from now on */
1524 1504
1525 idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */ 1505 idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
1526 1506
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index f9119c6d2a09..f9db2ce4c5c6 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -723,7 +723,7 @@ typedef struct vlsi_irda_dev {
723 void *virtaddr; 723 void *virtaddr;
724 struct vlsi_ring *tx_ring, *rx_ring; 724 struct vlsi_ring *tx_ring, *rx_ring;
725 725
726 struct timeval last_rx; 726 ktime_t last_rx;
727 727
728 spinlock_t lock; 728 spinlock_t lock;
729 struct mutex mtx; 729 struct mutex mtx;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 612e0731142d..1df38bdae2ee 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1471,11 +1471,17 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
1471}; 1471};
1472EXPORT_SYMBOL_GPL(macvlan_link_register); 1472EXPORT_SYMBOL_GPL(macvlan_link_register);
1473 1473
1474static struct net *macvlan_get_link_net(const struct net_device *dev)
1475{
1476 return dev_net(macvlan_dev_real_dev(dev));
1477}
1478
1474static struct rtnl_link_ops macvlan_link_ops = { 1479static struct rtnl_link_ops macvlan_link_ops = {
1475 .kind = "macvlan", 1480 .kind = "macvlan",
1476 .setup = macvlan_setup, 1481 .setup = macvlan_setup,
1477 .newlink = macvlan_newlink, 1482 .newlink = macvlan_newlink,
1478 .dellink = macvlan_dellink, 1483 .dellink = macvlan_dellink,
1484 .get_link_net = macvlan_get_link_net,
1479}; 1485};
1480 1486
1481static int macvlan_device_event(struct notifier_block *unused, 1487static int macvlan_device_event(struct notifier_block *unused,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 919f4fccc322..e40fdfccc9c1 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -642,7 +642,7 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
642 642
643 if (skb->ip_summed == CHECKSUM_PARTIAL) { 643 if (skb->ip_summed == CHECKSUM_PARTIAL) {
644 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 644 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
645 if (vlan_tx_tag_present(skb)) 645 if (skb_vlan_tag_present(skb))
646 vnet_hdr->csum_start = cpu_to_macvtap16(q, 646 vnet_hdr->csum_start = cpu_to_macvtap16(q,
647 skb_checksum_start_offset(skb) + VLAN_HLEN); 647 skb_checksum_start_offset(skb) + VLAN_HLEN);
648 else 648 else
@@ -818,13 +818,13 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
818 total = vnet_hdr_len; 818 total = vnet_hdr_len;
819 total += skb->len; 819 total += skb->len;
820 820
821 if (vlan_tx_tag_present(skb)) { 821 if (skb_vlan_tag_present(skb)) {
822 struct { 822 struct {
823 __be16 h_vlan_proto; 823 __be16 h_vlan_proto;
824 __be16 h_vlan_TCI; 824 __be16 h_vlan_TCI;
825 } veth; 825 } veth;
826 veth.h_vlan_proto = skb->vlan_proto; 826 veth.h_vlan_proto = skb->vlan_proto;
827 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 827 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
828 828
829 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 829 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
830 total += VLAN_HLEN; 830 total += VLAN_HLEN;
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 4a99c3919037..993570b1e2ae 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -302,7 +302,7 @@ void mii_check_link (struct mii_if_info *mii)
302} 302}
303 303
304/** 304/**
305 * mii_check_media - check the MII interface for a duplex change 305 * mii_check_media - check the MII interface for a carrier/speed/duplex change
306 * @mii: the MII interface 306 * @mii: the MII interface
307 * @ok_to_print: OK to print link up/down messages 307 * @ok_to_print: OK to print link up/down messages
308 * @init_media: OK to save duplex mode in @mii 308 * @init_media: OK to save duplex mode in @mii
@@ -318,10 +318,6 @@ unsigned int mii_check_media (struct mii_if_info *mii,
318 int advertise, lpa, media, duplex; 318 int advertise, lpa, media, duplex;
319 int lpa2 = 0; 319 int lpa2 = 0;
320 320
321 /* if forced media, go no further */
322 if (mii->force_media)
323 return 0; /* duplex did not change */
324
325 /* check current and old link status */ 321 /* check current and old link status */
326 old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; 322 old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0;
327 new_carrier = (unsigned int) mii_link_ok(mii); 323 new_carrier = (unsigned int) mii_link_ok(mii);
@@ -345,6 +341,12 @@ unsigned int mii_check_media (struct mii_if_info *mii,
345 */ 341 */
346 netif_carrier_on(mii->dev); 342 netif_carrier_on(mii->dev);
347 343
344 if (mii->force_media) {
345 if (ok_to_print)
346 netdev_info(mii->dev, "link up\n");
347 return 0; /* duplex did not change */
348 }
349
348 /* get MII advertise and LPA values */ 350 /* get MII advertise and LPA values */
349 if ((!init_media) && (mii->advertising)) 351 if ((!init_media) && (mii->advertising))
350 advertise = mii->advertising; 352 advertise = mii->advertising;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a3c251b79f38..16adbc481772 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -26,7 +26,7 @@ config AMD_PHY
26 26
27config AMD_XGBE_PHY 27config AMD_XGBE_PHY
28 tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs" 28 tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
29 depends on OF && HAS_IOMEM 29 depends on (OF || ACPI) && HAS_IOMEM
30 ---help--- 30 ---help---
31 Currently supports the AMD 10GbE PHY 31 Currently supports the AMD 10GbE PHY
32 32
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 903dc3dc9ea7..9e3af54c9010 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -60,6 +60,7 @@
60#include <linux/interrupt.h> 60#include <linux/interrupt.h>
61#include <linux/init.h> 61#include <linux/init.h>
62#include <linux/delay.h> 62#include <linux/delay.h>
63#include <linux/workqueue.h>
63#include <linux/netdevice.h> 64#include <linux/netdevice.h>
64#include <linux/etherdevice.h> 65#include <linux/etherdevice.h>
65#include <linux/skbuff.h> 66#include <linux/skbuff.h>
@@ -74,6 +75,9 @@
74#include <linux/of_platform.h> 75#include <linux/of_platform.h>
75#include <linux/of_device.h> 76#include <linux/of_device.h>
76#include <linux/uaccess.h> 77#include <linux/uaccess.h>
78#include <linux/bitops.h>
79#include <linux/property.h>
80#include <linux/acpi.h>
77 81
78MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); 82MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
79MODULE_LICENSE("Dual BSD/GPL"); 83MODULE_LICENSE("Dual BSD/GPL");
@@ -84,22 +88,43 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
84#define XGBE_PHY_MASK 0xfffffff0 88#define XGBE_PHY_MASK 0xfffffff0
85 89
86#define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set" 90#define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
91#define XGBE_PHY_BLWC_PROPERTY "amd,serdes-blwc"
92#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
93#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
94#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
95
96#define XGBE_PHY_SPEEDS 3
97#define XGBE_PHY_SPEED_1000 0
98#define XGBE_PHY_SPEED_2500 1
99#define XGBE_PHY_SPEED_10000 2
87 100
88#define XGBE_AN_INT_CMPLT 0x01 101#define XGBE_AN_INT_CMPLT 0x01
89#define XGBE_AN_INC_LINK 0x02 102#define XGBE_AN_INC_LINK 0x02
90#define XGBE_AN_PG_RCV 0x04 103#define XGBE_AN_PG_RCV 0x04
104#define XGBE_AN_INT_MASK 0x07
91 105
92#define XNP_MCF_NULL_MESSAGE 0x001 106#define XNP_MCF_NULL_MESSAGE 0x001
93#define XNP_ACK_PROCESSED (1 << 12) 107#define XNP_ACK_PROCESSED BIT(12)
94#define XNP_MP_FORMATTED (1 << 13) 108#define XNP_MP_FORMATTED BIT(13)
95#define XNP_NP_EXCHANGE (1 << 15) 109#define XNP_NP_EXCHANGE BIT(15)
96 110
97#define XGBE_PHY_RATECHANGE_COUNT 500 111#define XGBE_PHY_RATECHANGE_COUNT 500
98 112
113#define XGBE_PHY_KR_TRAINING_START 0x01
114#define XGBE_PHY_KR_TRAINING_ENABLE 0x02
115
116#define XGBE_PHY_FEC_ENABLE 0x01
117#define XGBE_PHY_FEC_FORWARD 0x02
118#define XGBE_PHY_FEC_MASK 0x03
119
99#ifndef MDIO_PMA_10GBR_PMD_CTRL 120#ifndef MDIO_PMA_10GBR_PMD_CTRL
100#define MDIO_PMA_10GBR_PMD_CTRL 0x0096 121#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
101#endif 122#endif
102 123
124#ifndef MDIO_PMA_10GBR_FEC_ABILITY
125#define MDIO_PMA_10GBR_FEC_ABILITY 0x00aa
126#endif
127
103#ifndef MDIO_PMA_10GBR_FEC_CTRL 128#ifndef MDIO_PMA_10GBR_FEC_CTRL
104#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab 129#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
105#endif 130#endif
@@ -108,6 +133,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
108#define MDIO_AN_XNP 0x0016 133#define MDIO_AN_XNP 0x0016
109#endif 134#endif
110 135
136#ifndef MDIO_AN_LPX
137#define MDIO_AN_LPX 0x0019
138#endif
139
111#ifndef MDIO_AN_INTMASK 140#ifndef MDIO_AN_INTMASK
112#define MDIO_AN_INTMASK 0x8001 141#define MDIO_AN_INTMASK 0x8001
113#endif 142#endif
@@ -116,18 +145,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
116#define MDIO_AN_INT 0x8002 145#define MDIO_AN_INT 0x8002
117#endif 146#endif
118 147
119#ifndef MDIO_AN_KR_CTRL
120#define MDIO_AN_KR_CTRL 0x8003
121#endif
122
123#ifndef MDIO_CTRL1_SPEED1G 148#ifndef MDIO_CTRL1_SPEED1G
124#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) 149#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
125#endif 150#endif
126 151
127#ifndef MDIO_KR_CTRL_PDETECT
128#define MDIO_KR_CTRL_PDETECT 0x01
129#endif
130
131/* SerDes integration register offsets */ 152/* SerDes integration register offsets */
132#define SIR0_KR_RT_1 0x002c 153#define SIR0_KR_RT_1 0x002c
133#define SIR0_STATUS 0x0040 154#define SIR0_STATUS 0x0040
@@ -140,10 +161,10 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
140#define SIR0_STATUS_RX_READY_WIDTH 1 161#define SIR0_STATUS_RX_READY_WIDTH 1
141#define SIR0_STATUS_TX_READY_INDEX 8 162#define SIR0_STATUS_TX_READY_INDEX 8
142#define SIR0_STATUS_TX_READY_WIDTH 1 163#define SIR0_STATUS_TX_READY_WIDTH 1
164#define SIR1_SPEED_CDR_RATE_INDEX 12
165#define SIR1_SPEED_CDR_RATE_WIDTH 4
143#define SIR1_SPEED_DATARATE_INDEX 4 166#define SIR1_SPEED_DATARATE_INDEX 4
144#define SIR1_SPEED_DATARATE_WIDTH 2 167#define SIR1_SPEED_DATARATE_WIDTH 2
145#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
146#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
147#define SIR1_SPEED_PLLSEL_INDEX 3 168#define SIR1_SPEED_PLLSEL_INDEX 3
148#define SIR1_SPEED_PLLSEL_WIDTH 1 169#define SIR1_SPEED_PLLSEL_WIDTH 1
149#define SIR1_SPEED_RATECHANGE_INDEX 6 170#define SIR1_SPEED_RATECHANGE_INDEX 6
@@ -153,20 +174,26 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
153#define SIR1_SPEED_WORDMODE_INDEX 0 174#define SIR1_SPEED_WORDMODE_INDEX 0
154#define SIR1_SPEED_WORDMODE_WIDTH 3 175#define SIR1_SPEED_WORDMODE_WIDTH 3
155 176
177#define SPEED_10000_BLWC 0
156#define SPEED_10000_CDR 0x7 178#define SPEED_10000_CDR 0x7
157#define SPEED_10000_PLL 0x1 179#define SPEED_10000_PLL 0x1
180#define SPEED_10000_PQ 0x1e
158#define SPEED_10000_RATE 0x0 181#define SPEED_10000_RATE 0x0
159#define SPEED_10000_TXAMP 0xa 182#define SPEED_10000_TXAMP 0xa
160#define SPEED_10000_WORD 0x7 183#define SPEED_10000_WORD 0x7
161 184
185#define SPEED_2500_BLWC 1
162#define SPEED_2500_CDR 0x2 186#define SPEED_2500_CDR 0x2
163#define SPEED_2500_PLL 0x0 187#define SPEED_2500_PLL 0x0
188#define SPEED_2500_PQ 0xa
164#define SPEED_2500_RATE 0x1 189#define SPEED_2500_RATE 0x1
165#define SPEED_2500_TXAMP 0xf 190#define SPEED_2500_TXAMP 0xf
166#define SPEED_2500_WORD 0x1 191#define SPEED_2500_WORD 0x1
167 192
193#define SPEED_1000_BLWC 1
168#define SPEED_1000_CDR 0x2 194#define SPEED_1000_CDR 0x2
169#define SPEED_1000_PLL 0x0 195#define SPEED_1000_PLL 0x0
196#define SPEED_1000_PQ 0xa
170#define SPEED_1000_RATE 0x3 197#define SPEED_1000_RATE 0x3
171#define SPEED_1000_TXAMP 0xf 198#define SPEED_1000_TXAMP 0xf
172#define SPEED_1000_WORD 0x1 199#define SPEED_1000_WORD 0x1
@@ -181,15 +208,6 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
181#define RXTX_REG114_PQ_REG_INDEX 9 208#define RXTX_REG114_PQ_REG_INDEX 9
182#define RXTX_REG114_PQ_REG_WIDTH 7 209#define RXTX_REG114_PQ_REG_WIDTH 7
183 210
184#define RXTX_10000_BLWC 0
185#define RXTX_10000_PQ 0x1e
186
187#define RXTX_2500_BLWC 1
188#define RXTX_2500_PQ 0xa
189
190#define RXTX_1000_BLWC 1
191#define RXTX_1000_PQ 0xa
192
193/* Bit setting and getting macros 211/* Bit setting and getting macros
194 * The get macro will extract the current bit field value from within 212 * The get macro will extract the current bit field value from within
195 * the variable 213 * the variable
@@ -291,23 +309,44 @@ do { \
291 XRXTX_IOWRITE((_priv), _reg, reg_val); \ 309 XRXTX_IOWRITE((_priv), _reg, reg_val); \
292} while (0) 310} while (0)
293 311
312static const u32 amd_xgbe_phy_serdes_blwc[] = {
313 SPEED_1000_BLWC,
314 SPEED_2500_BLWC,
315 SPEED_10000_BLWC,
316};
317
318static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
319 SPEED_1000_CDR,
320 SPEED_2500_CDR,
321 SPEED_10000_CDR,
322};
323
324static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
325 SPEED_1000_PQ,
326 SPEED_2500_PQ,
327 SPEED_10000_PQ,
328};
329
330static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
331 SPEED_1000_TXAMP,
332 SPEED_2500_TXAMP,
333 SPEED_10000_TXAMP,
334};
335
294enum amd_xgbe_phy_an { 336enum amd_xgbe_phy_an {
295 AMD_XGBE_AN_READY = 0, 337 AMD_XGBE_AN_READY = 0,
296 AMD_XGBE_AN_START,
297 AMD_XGBE_AN_EVENT,
298 AMD_XGBE_AN_PAGE_RECEIVED, 338 AMD_XGBE_AN_PAGE_RECEIVED,
299 AMD_XGBE_AN_INCOMPAT_LINK, 339 AMD_XGBE_AN_INCOMPAT_LINK,
300 AMD_XGBE_AN_COMPLETE, 340 AMD_XGBE_AN_COMPLETE,
301 AMD_XGBE_AN_NO_LINK, 341 AMD_XGBE_AN_NO_LINK,
302 AMD_XGBE_AN_EXIT,
303 AMD_XGBE_AN_ERROR, 342 AMD_XGBE_AN_ERROR,
304}; 343};
305 344
306enum amd_xgbe_phy_rx { 345enum amd_xgbe_phy_rx {
307 AMD_XGBE_RX_READY = 0, 346 AMD_XGBE_RX_BPA = 0,
308 AMD_XGBE_RX_BPA,
309 AMD_XGBE_RX_XNP, 347 AMD_XGBE_RX_XNP,
310 AMD_XGBE_RX_COMPLETE, 348 AMD_XGBE_RX_COMPLETE,
349 AMD_XGBE_RX_ERROR,
311}; 350};
312 351
313enum amd_xgbe_phy_mode { 352enum amd_xgbe_phy_mode {
@@ -316,12 +355,13 @@ enum amd_xgbe_phy_mode {
316}; 355};
317 356
318enum amd_xgbe_phy_speedset { 357enum amd_xgbe_phy_speedset {
319 AMD_XGBE_PHY_SPEEDSET_1000_10000, 358 AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
320 AMD_XGBE_PHY_SPEEDSET_2500_10000, 359 AMD_XGBE_PHY_SPEEDSET_2500_10000,
321}; 360};
322 361
323struct amd_xgbe_phy_priv { 362struct amd_xgbe_phy_priv {
324 struct platform_device *pdev; 363 struct platform_device *pdev;
364 struct acpi_device *adev;
325 struct device *dev; 365 struct device *dev;
326 366
327 struct phy_device *phydev; 367 struct phy_device *phydev;
@@ -336,10 +376,24 @@ struct amd_xgbe_phy_priv {
336 void __iomem *sir0_regs; /* SerDes integration registers (1/2) */ 376 void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
337 void __iomem *sir1_regs; /* SerDes integration registers (2/2) */ 377 void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
338 378
339 /* Maintain link status for re-starting auto-negotiation */ 379 int an_irq;
340 unsigned int link; 380 char an_irq_name[IFNAMSIZ + 32];
381 struct work_struct an_irq_work;
382 unsigned int an_irq_allocated;
383
341 unsigned int speed_set; 384 unsigned int speed_set;
342 385
386 /* SerDes UEFI configurable settings.
387 * Switching between modes/speeds requires new values for some
388 * SerDes settings. The values can be supplied as device
389 * properties in array format. The first array entry is for
390 * 1GbE, second for 2.5GbE and third for 10GbE
391 */
392 u32 serdes_blwc[XGBE_PHY_SPEEDS];
393 u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
394 u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
395 u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
396
343 /* Auto-negotiation state machine support */ 397 /* Auto-negotiation state machine support */
344 struct mutex an_mutex; 398 struct mutex an_mutex;
345 enum amd_xgbe_phy_an an_result; 399 enum amd_xgbe_phy_an an_result;
@@ -348,7 +402,11 @@ struct amd_xgbe_phy_priv {
348 enum amd_xgbe_phy_rx kx_state; 402 enum amd_xgbe_phy_rx kx_state;
349 struct work_struct an_work; 403 struct work_struct an_work;
350 struct workqueue_struct *an_workqueue; 404 struct workqueue_struct *an_workqueue;
405 unsigned int an_supported;
351 unsigned int parallel_detect; 406 unsigned int parallel_detect;
407 unsigned int fec_ability;
408
409 unsigned int lpm_ctrl; /* CTRL1 for resume */
352}; 410};
353 411
354static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev) 412static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
@@ -359,7 +417,7 @@ static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
359 if (ret < 0) 417 if (ret < 0)
360 return ret; 418 return ret;
361 419
362 ret |= 0x02; 420 ret |= XGBE_PHY_KR_TRAINING_ENABLE;
363 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret); 421 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
364 422
365 return 0; 423 return 0;
@@ -373,7 +431,7 @@ static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
373 if (ret < 0) 431 if (ret < 0)
374 return ret; 432 return ret;
375 433
376 ret &= ~0x02; 434 ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
377 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret); 435 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
378 436
379 return 0; 437 return 0;
@@ -466,12 +524,16 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
466 524
467 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE); 525 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
468 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD); 526 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
469 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
470 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL); 527 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
471 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
472 528
473 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC); 529 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
474 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ); 530 priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
531 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
532 priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
533 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
534 priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
535 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
536 priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
475 537
476 amd_xgbe_phy_serdes_complete_ratechange(phydev); 538 amd_xgbe_phy_serdes_complete_ratechange(phydev);
477 539
@@ -514,12 +576,16 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
514 576
515 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE); 577 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
516 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD); 578 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
517 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
518 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL); 579 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
519 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
520 580
521 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC); 581 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
522 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ); 582 priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
583 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
584 priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
585 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
586 priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
587 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
588 priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
523 589
524 amd_xgbe_phy_serdes_complete_ratechange(phydev); 590 amd_xgbe_phy_serdes_complete_ratechange(phydev);
525 591
@@ -562,12 +628,16 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
562 628
563 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE); 629 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
564 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD); 630 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
565 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
566 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL); 631 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
567 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
568 632
569 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC); 633 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
570 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ); 634 priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
635 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
636 priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
637 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
638 priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
639 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
640 priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
571 641
572 amd_xgbe_phy_serdes_complete_ratechange(phydev); 642 amd_xgbe_phy_serdes_complete_ratechange(phydev);
573 643
@@ -635,6 +705,38 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
635 return ret; 705 return ret;
636} 706}
637 707
708static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
709 bool restart)
710{
711 int ret;
712
713 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
714 if (ret < 0)
715 return ret;
716
717 ret &= ~MDIO_AN_CTRL1_ENABLE;
718
719 if (enable)
720 ret |= MDIO_AN_CTRL1_ENABLE;
721
722 if (restart)
723 ret |= MDIO_AN_CTRL1_RESTART;
724
725 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
726
727 return 0;
728}
729
730static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
731{
732 return amd_xgbe_phy_set_an(phydev, true, true);
733}
734
735static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
736{
737 return amd_xgbe_phy_set_an(phydev, false, false);
738}
739
638static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev, 740static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
639 enum amd_xgbe_phy_rx *state) 741 enum amd_xgbe_phy_rx *state)
640{ 742{
@@ -645,7 +747,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
645 747
646 /* If we're not in KR mode then we're done */ 748 /* If we're not in KR mode then we're done */
647 if (!amd_xgbe_phy_in_kr_mode(phydev)) 749 if (!amd_xgbe_phy_in_kr_mode(phydev))
648 return AMD_XGBE_AN_EVENT; 750 return AMD_XGBE_AN_PAGE_RECEIVED;
649 751
650 /* Enable/Disable FEC */ 752 /* Enable/Disable FEC */
651 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); 753 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
@@ -660,10 +762,9 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
660 if (ret < 0) 762 if (ret < 0)
661 return AMD_XGBE_AN_ERROR; 763 return AMD_XGBE_AN_ERROR;
662 764
765 ret &= ~XGBE_PHY_FEC_MASK;
663 if ((ad_reg & 0xc000) && (lp_reg & 0xc000)) 766 if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
664 ret |= 0x01; 767 ret |= priv->fec_ability;
665 else
666 ret &= ~0x01;
667 768
668 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret); 769 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
669 770
@@ -672,14 +773,17 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
672 if (ret < 0) 773 if (ret < 0)
673 return AMD_XGBE_AN_ERROR; 774 return AMD_XGBE_AN_ERROR;
674 775
675 XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1); 776 if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
777 XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
676 778
677 ret |= 0x01; 779 ret |= XGBE_PHY_KR_TRAINING_START;
678 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret); 780 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
781 ret);
679 782
680 XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0); 783 XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
784 }
681 785
682 return AMD_XGBE_AN_EVENT; 786 return AMD_XGBE_AN_PAGE_RECEIVED;
683} 787}
684 788
685static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev, 789static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
@@ -696,7 +800,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
696 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0); 800 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
697 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg); 801 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
698 802
699 return AMD_XGBE_AN_EVENT; 803 return AMD_XGBE_AN_PAGE_RECEIVED;
700} 804}
701 805
702static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev, 806static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
@@ -735,11 +839,11 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
735 int ad_reg, lp_reg; 839 int ad_reg, lp_reg;
736 840
737 /* Check Extended Next Page support */ 841 /* Check Extended Next Page support */
738 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE); 842 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
739 if (ad_reg < 0) 843 if (ad_reg < 0)
740 return AMD_XGBE_AN_ERROR; 844 return AMD_XGBE_AN_ERROR;
741 845
742 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA); 846 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
743 if (lp_reg < 0) 847 if (lp_reg < 0)
744 return AMD_XGBE_AN_ERROR; 848 return AMD_XGBE_AN_ERROR;
745 849
@@ -748,226 +852,255 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
748 amd_xgbe_an_tx_training(phydev, state); 852 amd_xgbe_an_tx_training(phydev, state);
749} 853}
750 854
751static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev) 855static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
856{
857 struct amd_xgbe_phy_priv *priv = phydev->priv;
858 enum amd_xgbe_phy_rx *state;
859 int ret;
860
861 state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
862 : &priv->kx_state;
863
864 switch (*state) {
865 case AMD_XGBE_RX_BPA:
866 ret = amd_xgbe_an_rx_bpa(phydev, state);
867 break;
868
869 case AMD_XGBE_RX_XNP:
870 ret = amd_xgbe_an_rx_xnp(phydev, state);
871 break;
872
873 default:
874 ret = AMD_XGBE_AN_ERROR;
875 }
876
877 return ret;
878}
879
880static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
752{ 881{
753 struct amd_xgbe_phy_priv *priv = phydev->priv; 882 struct amd_xgbe_phy_priv *priv = phydev->priv;
754 int ret; 883 int ret;
755 884
756 /* Be sure we aren't looping trying to negotiate */ 885 /* Be sure we aren't looping trying to negotiate */
757 if (amd_xgbe_phy_in_kr_mode(phydev)) { 886 if (amd_xgbe_phy_in_kr_mode(phydev)) {
758 if (priv->kr_state != AMD_XGBE_RX_READY) 887 priv->kr_state = AMD_XGBE_RX_ERROR;
888
889 if (!(phydev->supported & SUPPORTED_1000baseKX_Full) &&
890 !(phydev->supported & SUPPORTED_2500baseX_Full))
891 return AMD_XGBE_AN_NO_LINK;
892
893 if (priv->kx_state != AMD_XGBE_RX_BPA)
759 return AMD_XGBE_AN_NO_LINK; 894 return AMD_XGBE_AN_NO_LINK;
760 priv->kr_state = AMD_XGBE_RX_BPA;
761 } else { 895 } else {
762 if (priv->kx_state != AMD_XGBE_RX_READY) 896 priv->kx_state = AMD_XGBE_RX_ERROR;
897
898 if (!(phydev->supported & SUPPORTED_10000baseKR_Full))
899 return AMD_XGBE_AN_NO_LINK;
900
901 if (priv->kr_state != AMD_XGBE_RX_BPA)
763 return AMD_XGBE_AN_NO_LINK; 902 return AMD_XGBE_AN_NO_LINK;
764 priv->kx_state = AMD_XGBE_RX_BPA;
765 } 903 }
766 904
767 /* Set up Advertisement register 3 first */ 905 ret = amd_xgbe_phy_disable_an(phydev);
768 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); 906 if (ret)
769 if (ret < 0)
770 return AMD_XGBE_AN_ERROR; 907 return AMD_XGBE_AN_ERROR;
771 908
772 if (phydev->supported & SUPPORTED_10000baseR_FEC) 909 ret = amd_xgbe_phy_switch_mode(phydev);
773 ret |= 0xc000; 910 if (ret)
774 else
775 ret &= ~0xc000;
776
777 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
778
779 /* Set up Advertisement register 2 next */
780 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
781 if (ret < 0)
782 return AMD_XGBE_AN_ERROR; 911 return AMD_XGBE_AN_ERROR;
783 912
784 if (phydev->supported & SUPPORTED_10000baseKR_Full) 913 ret = amd_xgbe_phy_restart_an(phydev);
785 ret |= 0x80; 914 if (ret)
786 else
787 ret &= ~0x80;
788
789 if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
790 (phydev->supported & SUPPORTED_2500baseX_Full))
791 ret |= 0x20;
792 else
793 ret &= ~0x20;
794
795 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
796
797 /* Set up Advertisement register 1 last */
798 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
799 if (ret < 0)
800 return AMD_XGBE_AN_ERROR; 915 return AMD_XGBE_AN_ERROR;
801 916
802 if (phydev->supported & SUPPORTED_Pause) 917 return AMD_XGBE_AN_INCOMPAT_LINK;
803 ret |= 0x400; 918}
804 else
805 ret &= ~0x400;
806 919
807 if (phydev->supported & SUPPORTED_Asym_Pause) 920static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
808 ret |= 0x800; 921{
809 else 922 struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
810 ret &= ~0x800;
811 923
812 /* We don't intend to perform XNP */ 924 /* Interrupt reason must be read and cleared outside of IRQ context */
813 ret &= ~XNP_NP_EXCHANGE; 925 disable_irq_nosync(priv->an_irq);
814 926
815 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret); 927 queue_work(priv->an_workqueue, &priv->an_irq_work);
816 928
817 /* Enable and start auto-negotiation */ 929 return IRQ_HANDLED;
818 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0); 930}
819 931
820 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL); 932static void amd_xgbe_an_irq_work(struct work_struct *work)
821 if (ret < 0) 933{
822 return AMD_XGBE_AN_ERROR; 934 struct amd_xgbe_phy_priv *priv = container_of(work,
935 struct amd_xgbe_phy_priv,
936 an_irq_work);
823 937
824 ret |= MDIO_KR_CTRL_PDETECT; 938 /* Avoid a race between enabling the IRQ and exiting the work by
825 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret); 939 * waiting for the work to finish and then queueing it
940 */
941 flush_work(&priv->an_work);
942 queue_work(priv->an_workqueue, &priv->an_work);
943}
826 944
827 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); 945static void amd_xgbe_an_state_machine(struct work_struct *work)
828 if (ret < 0) 946{
829 return AMD_XGBE_AN_ERROR; 947 struct amd_xgbe_phy_priv *priv = container_of(work,
948 struct amd_xgbe_phy_priv,
949 an_work);
950 struct phy_device *phydev = priv->phydev;
951 enum amd_xgbe_phy_an cur_state = priv->an_state;
952 int int_reg, int_mask;
830 953
831 ret |= MDIO_AN_CTRL1_ENABLE; 954 mutex_lock(&priv->an_mutex);
832 ret |= MDIO_AN_CTRL1_RESTART;
833 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
834 955
835 return AMD_XGBE_AN_EVENT; 956 /* Read the interrupt */
836} 957 int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
958 if (!int_reg)
959 goto out;
837 960
838static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev) 961next_int:
839{ 962 if (int_reg < 0) {
840 enum amd_xgbe_phy_an new_state; 963 priv->an_state = AMD_XGBE_AN_ERROR;
841 int ret; 964 int_mask = XGBE_AN_INT_MASK;
965 } else if (int_reg & XGBE_AN_PG_RCV) {
966 priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
967 int_mask = XGBE_AN_PG_RCV;
968 } else if (int_reg & XGBE_AN_INC_LINK) {
969 priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
970 int_mask = XGBE_AN_INC_LINK;
971 } else if (int_reg & XGBE_AN_INT_CMPLT) {
972 priv->an_state = AMD_XGBE_AN_COMPLETE;
973 int_mask = XGBE_AN_INT_CMPLT;
974 } else {
975 priv->an_state = AMD_XGBE_AN_ERROR;
976 int_mask = 0;
977 }
842 978
843 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT); 979 /* Clear the interrupt to be processed */
844 if (ret < 0) 980 int_reg &= ~int_mask;
845 return AMD_XGBE_AN_ERROR; 981 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
846 982
847 new_state = AMD_XGBE_AN_EVENT; 983 priv->an_result = priv->an_state;
848 if (ret & XGBE_AN_PG_RCV)
849 new_state = AMD_XGBE_AN_PAGE_RECEIVED;
850 else if (ret & XGBE_AN_INC_LINK)
851 new_state = AMD_XGBE_AN_INCOMPAT_LINK;
852 else if (ret & XGBE_AN_INT_CMPLT)
853 new_state = AMD_XGBE_AN_COMPLETE;
854 984
855 if (new_state != AMD_XGBE_AN_EVENT) 985again:
856 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0); 986 cur_state = priv->an_state;
857 987
858 return new_state; 988 switch (priv->an_state) {
859} 989 case AMD_XGBE_AN_READY:
990 priv->an_supported = 0;
991 break;
860 992
861static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev) 993 case AMD_XGBE_AN_PAGE_RECEIVED:
862{ 994 priv->an_state = amd_xgbe_an_page_received(phydev);
863 struct amd_xgbe_phy_priv *priv = phydev->priv; 995 priv->an_supported++;
864 enum amd_xgbe_phy_rx *state; 996 break;
865 int ret;
866 997
867 state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state 998 case AMD_XGBE_AN_INCOMPAT_LINK:
868 : &priv->kx_state; 999 priv->an_supported = 0;
1000 priv->parallel_detect = 0;
1001 priv->an_state = amd_xgbe_an_incompat_link(phydev);
1002 break;
869 1003
870 switch (*state) { 1004 case AMD_XGBE_AN_COMPLETE:
871 case AMD_XGBE_RX_BPA: 1005 priv->parallel_detect = priv->an_supported ? 0 : 1;
872 ret = amd_xgbe_an_rx_bpa(phydev, state); 1006 netdev_dbg(phydev->attached_dev, "%s successful\n",
1007 priv->an_supported ? "Auto negotiation"
1008 : "Parallel detection");
873 break; 1009 break;
874 1010
875 case AMD_XGBE_RX_XNP: 1011 case AMD_XGBE_AN_NO_LINK:
876 ret = amd_xgbe_an_rx_xnp(phydev, state);
877 break; 1012 break;
878 1013
879 default: 1014 default:
880 ret = AMD_XGBE_AN_ERROR; 1015 priv->an_state = AMD_XGBE_AN_ERROR;
881 } 1016 }
882 1017
883 return ret; 1018 if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
884} 1019 int_reg = 0;
1020 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1021 } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
1022 netdev_err(phydev->attached_dev,
1023 "error during auto-negotiation, state=%u\n",
1024 cur_state);
885 1025
886static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev) 1026 int_reg = 0;
887{ 1027 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
888 int ret; 1028 }
889 1029
890 ret = amd_xgbe_phy_switch_mode(phydev); 1030 if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
891 if (ret) 1031 priv->an_result = priv->an_state;
892 return AMD_XGBE_AN_ERROR; 1032 priv->an_state = AMD_XGBE_AN_READY;
1033 priv->kr_state = AMD_XGBE_RX_BPA;
1034 priv->kx_state = AMD_XGBE_RX_BPA;
1035 }
893 1036
894 return AMD_XGBE_AN_START; 1037 if (cur_state != priv->an_state)
895} 1038 goto again;
896 1039
897static void amd_xgbe_an_state_machine(struct work_struct *work) 1040 if (int_reg)
898{ 1041 goto next_int;
899 struct amd_xgbe_phy_priv *priv = container_of(work,
900 struct amd_xgbe_phy_priv,
901 an_work);
902 struct phy_device *phydev = priv->phydev;
903 enum amd_xgbe_phy_an cur_state;
904 int sleep;
905 unsigned int an_supported = 0;
906 1042
907 /* Start in KX mode */ 1043out:
908 if (amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX)) 1044 enable_irq(priv->an_irq);
909 priv->an_state = AMD_XGBE_AN_ERROR;
910 1045
911 while (1) { 1046 mutex_unlock(&priv->an_mutex);
912 mutex_lock(&priv->an_mutex); 1047}
913 1048
914 cur_state = priv->an_state; 1049static int amd_xgbe_an_init(struct phy_device *phydev)
1050{
1051 int ret;
915 1052
916 switch (priv->an_state) { 1053 /* Set up Advertisement register 3 first */
917 case AMD_XGBE_AN_START: 1054 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
918 an_supported = 0; 1055 if (ret < 0)
919 priv->parallel_detect = 0; 1056 return ret;
920 priv->an_state = amd_xgbe_an_start(phydev);
921 break;
922 1057
923 case AMD_XGBE_AN_EVENT: 1058 if (phydev->supported & SUPPORTED_10000baseR_FEC)
924 priv->an_state = amd_xgbe_an_event(phydev); 1059 ret |= 0xc000;
925 break; 1060 else
1061 ret &= ~0xc000;
926 1062
927 case AMD_XGBE_AN_PAGE_RECEIVED: 1063 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
928 priv->an_state = amd_xgbe_an_page_received(phydev);
929 an_supported++;
930 break;
931 1064
932 case AMD_XGBE_AN_INCOMPAT_LINK: 1065 /* Set up Advertisement register 2 next */
933 priv->an_state = amd_xgbe_an_incompat_link(phydev); 1066 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
934 break; 1067 if (ret < 0)
1068 return ret;
935 1069
936 case AMD_XGBE_AN_COMPLETE: 1070 if (phydev->supported & SUPPORTED_10000baseKR_Full)
937 priv->parallel_detect = an_supported ? 0 : 1; 1071 ret |= 0x80;
938 netdev_info(phydev->attached_dev, "%s successful\n", 1072 else
939 an_supported ? "Auto negotiation" 1073 ret &= ~0x80;
940 : "Parallel detection");
941 /* fall through */
942 1074
943 case AMD_XGBE_AN_NO_LINK: 1075 if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
944 case AMD_XGBE_AN_EXIT: 1076 (phydev->supported & SUPPORTED_2500baseX_Full))
945 goto exit_unlock; 1077 ret |= 0x20;
1078 else
1079 ret &= ~0x20;
946 1080
947 default: 1081 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
948 priv->an_state = AMD_XGBE_AN_ERROR;
949 }
950 1082
951 if (priv->an_state == AMD_XGBE_AN_ERROR) { 1083 /* Set up Advertisement register 1 last */
952 netdev_err(phydev->attached_dev, 1084 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
953 "error during auto-negotiation, state=%u\n", 1085 if (ret < 0)
954 cur_state); 1086 return ret;
955 goto exit_unlock;
956 }
957 1087
958 sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0; 1088 if (phydev->supported & SUPPORTED_Pause)
1089 ret |= 0x400;
1090 else
1091 ret &= ~0x400;
959 1092
960 mutex_unlock(&priv->an_mutex); 1093 if (phydev->supported & SUPPORTED_Asym_Pause)
1094 ret |= 0x800;
1095 else
1096 ret &= ~0x800;
961 1097
962 if (sleep) 1098 /* We don't intend to perform XNP */
963 usleep_range(20, 50); 1099 ret &= ~XNP_NP_EXCHANGE;
964 }
965 1100
966exit_unlock: 1101 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
967 priv->an_result = priv->an_state;
968 priv->an_state = AMD_XGBE_AN_READY;
969 1102
970 mutex_unlock(&priv->an_mutex); 1103 return 0;
971} 1104}
972 1105
973static int amd_xgbe_phy_soft_reset(struct phy_device *phydev) 1106static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
@@ -992,20 +1125,57 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
992 if (ret & MDIO_CTRL1_RESET) 1125 if (ret & MDIO_CTRL1_RESET)
993 return -ETIMEDOUT; 1126 return -ETIMEDOUT;
994 1127
995 /* Make sure the XPCS and SerDes are in compatible states */ 1128 /* Disable auto-negotiation for now */
996 return amd_xgbe_phy_xgmii_mode(phydev); 1129 ret = amd_xgbe_phy_disable_an(phydev);
1130 if (ret < 0)
1131 return ret;
1132
1133 /* Clear auto-negotiation interrupts */
1134 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1135
1136 return 0;
997} 1137}
998 1138
999static int amd_xgbe_phy_config_init(struct phy_device *phydev) 1139static int amd_xgbe_phy_config_init(struct phy_device *phydev)
1000{ 1140{
1001 struct amd_xgbe_phy_priv *priv = phydev->priv; 1141 struct amd_xgbe_phy_priv *priv = phydev->priv;
1142 struct net_device *netdev = phydev->attached_dev;
1143 int ret;
1144
1145 if (!priv->an_irq_allocated) {
1146 /* Allocate the auto-negotiation workqueue and interrupt */
1147 snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
1148 "%s-pcs", netdev_name(netdev));
1149
1150 priv->an_workqueue =
1151 create_singlethread_workqueue(priv->an_irq_name);
1152 if (!priv->an_workqueue) {
1153 netdev_err(netdev, "phy workqueue creation failed\n");
1154 return -ENOMEM;
1155 }
1156
1157 ret = devm_request_irq(priv->dev, priv->an_irq,
1158 amd_xgbe_an_isr, 0, priv->an_irq_name,
1159 priv);
1160 if (ret) {
1161 netdev_err(netdev, "phy irq request failed\n");
1162 destroy_workqueue(priv->an_workqueue);
1163 return ret;
1164 }
1165
1166 priv->an_irq_allocated = 1;
1167 }
1168
1169 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
1170 if (ret < 0)
1171 return ret;
1172 priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
1002 1173
1003 /* Initialize supported features */ 1174 /* Initialize supported features */
1004 phydev->supported = SUPPORTED_Autoneg; 1175 phydev->supported = SUPPORTED_Autoneg;
1005 phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1176 phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1006 phydev->supported |= SUPPORTED_Backplane; 1177 phydev->supported |= SUPPORTED_Backplane;
1007 phydev->supported |= SUPPORTED_10000baseKR_Full | 1178 phydev->supported |= SUPPORTED_10000baseKR_Full;
1008 SUPPORTED_10000baseR_FEC;
1009 switch (priv->speed_set) { 1179 switch (priv->speed_set) {
1010 case AMD_XGBE_PHY_SPEEDSET_1000_10000: 1180 case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1011 phydev->supported |= SUPPORTED_1000baseKX_Full; 1181 phydev->supported |= SUPPORTED_1000baseKX_Full;
@@ -1014,11 +1184,33 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
1014 phydev->supported |= SUPPORTED_2500baseX_Full; 1184 phydev->supported |= SUPPORTED_2500baseX_Full;
1015 break; 1185 break;
1016 } 1186 }
1187
1188 if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
1189 phydev->supported |= SUPPORTED_10000baseR_FEC;
1190
1017 phydev->advertising = phydev->supported; 1191 phydev->advertising = phydev->supported;
1018 1192
1019 /* Turn off and clear interrupts */ 1193 /* Set initial mode - call the mode setting routines
1020 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); 1194 * directly to insure we are properly configured
1021 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0); 1195 */
1196 if (phydev->supported & SUPPORTED_10000baseKR_Full)
1197 ret = amd_xgbe_phy_xgmii_mode(phydev);
1198 else if (phydev->supported & SUPPORTED_1000baseKX_Full)
1199 ret = amd_xgbe_phy_gmii_mode(phydev);
1200 else if (phydev->supported & SUPPORTED_2500baseX_Full)
1201 ret = amd_xgbe_phy_gmii_2500_mode(phydev);
1202 else
1203 ret = -EINVAL;
1204 if (ret < 0)
1205 return ret;
1206
1207 /* Set up advertisement registers based on current settings */
1208 ret = amd_xgbe_an_init(phydev);
1209 if (ret)
1210 return ret;
1211
1212 /* Enable auto-negotiation interrupts */
1213 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
1022 1214
1023 return 0; 1215 return 0;
1024} 1216}
@@ -1028,25 +1220,19 @@ static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
1028 int ret; 1220 int ret;
1029 1221
1030 /* Disable auto-negotiation */ 1222 /* Disable auto-negotiation */
1031 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); 1223 ret = amd_xgbe_phy_disable_an(phydev);
1032 if (ret < 0) 1224 if (ret < 0)
1033 return ret; 1225 return ret;
1034 1226
1035 ret &= ~MDIO_AN_CTRL1_ENABLE;
1036 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
1037
1038 /* Validate/Set specified speed */ 1227 /* Validate/Set specified speed */
1039 switch (phydev->speed) { 1228 switch (phydev->speed) {
1040 case SPEED_10000: 1229 case SPEED_10000:
1041 ret = amd_xgbe_phy_xgmii_mode(phydev); 1230 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
1042 break; 1231 break;
1043 1232
1044 case SPEED_2500: 1233 case SPEED_2500:
1045 ret = amd_xgbe_phy_gmii_2500_mode(phydev);
1046 break;
1047
1048 case SPEED_1000: 1234 case SPEED_1000:
1049 ret = amd_xgbe_phy_gmii_mode(phydev); 1235 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
1050 break; 1236 break;
1051 1237
1052 default: 1238 default:
@@ -1066,10 +1252,11 @@ static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
1066 return 0; 1252 return 0;
1067} 1253}
1068 1254
1069static int amd_xgbe_phy_config_aneg(struct phy_device *phydev) 1255static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1070{ 1256{
1071 struct amd_xgbe_phy_priv *priv = phydev->priv; 1257 struct amd_xgbe_phy_priv *priv = phydev->priv;
1072 u32 mmd_mask = phydev->c45_ids.devices_in_package; 1258 u32 mmd_mask = phydev->c45_ids.devices_in_package;
1259 int ret;
1073 1260
1074 if (phydev->autoneg != AUTONEG_ENABLE) 1261 if (phydev->autoneg != AUTONEG_ENABLE)
1075 return amd_xgbe_phy_setup_forced(phydev); 1262 return amd_xgbe_phy_setup_forced(phydev);
@@ -1078,56 +1265,79 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1078 if (!(mmd_mask & MDIO_DEVS_AN)) 1265 if (!(mmd_mask & MDIO_DEVS_AN))
1079 return -EINVAL; 1266 return -EINVAL;
1080 1267
1081 /* Start/Restart the auto-negotiation state machine */ 1268 /* Disable auto-negotiation interrupt */
1082 mutex_lock(&priv->an_mutex); 1269 disable_irq(priv->an_irq);
1270
1271 /* Start auto-negotiation in a supported mode */
1272 if (phydev->supported & SUPPORTED_10000baseKR_Full)
1273 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
1274 else if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
1275 (phydev->supported & SUPPORTED_2500baseX_Full))
1276 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
1277 else
1278 ret = -EINVAL;
1279 if (ret < 0) {
1280 enable_irq(priv->an_irq);
1281 return ret;
1282 }
1283
1284 /* Disable and stop any in progress auto-negotiation */
1285 ret = amd_xgbe_phy_disable_an(phydev);
1286 if (ret < 0)
1287 return ret;
1288
1289 /* Clear any auto-negotitation interrupts */
1290 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1291
1083 priv->an_result = AMD_XGBE_AN_READY; 1292 priv->an_result = AMD_XGBE_AN_READY;
1084 priv->an_state = AMD_XGBE_AN_START; 1293 priv->an_state = AMD_XGBE_AN_READY;
1085 priv->kr_state = AMD_XGBE_RX_READY; 1294 priv->kr_state = AMD_XGBE_RX_BPA;
1086 priv->kx_state = AMD_XGBE_RX_READY; 1295 priv->kx_state = AMD_XGBE_RX_BPA;
1087 mutex_unlock(&priv->an_mutex);
1088 1296
1089 queue_work(priv->an_workqueue, &priv->an_work); 1297 /* Re-enable auto-negotiation interrupt */
1298 enable_irq(priv->an_irq);
1090 1299
1091 return 0; 1300 /* Set up advertisement registers based on current settings */
1301 ret = amd_xgbe_an_init(phydev);
1302 if (ret)
1303 return ret;
1304
1305 /* Enable and start auto-negotiation */
1306 return amd_xgbe_phy_restart_an(phydev);
1092} 1307}
1093 1308
1094static int amd_xgbe_phy_aneg_done(struct phy_device *phydev) 1309static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1095{ 1310{
1096 struct amd_xgbe_phy_priv *priv = phydev->priv; 1311 struct amd_xgbe_phy_priv *priv = phydev->priv;
1097 enum amd_xgbe_phy_an state; 1312 int ret;
1098 1313
1099 mutex_lock(&priv->an_mutex); 1314 mutex_lock(&priv->an_mutex);
1100 state = priv->an_result; 1315
1316 ret = __amd_xgbe_phy_config_aneg(phydev);
1317
1101 mutex_unlock(&priv->an_mutex); 1318 mutex_unlock(&priv->an_mutex);
1102 1319
1103 return (state == AMD_XGBE_AN_COMPLETE); 1320 return ret;
1321}
1322
1323static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
1324{
1325 struct amd_xgbe_phy_priv *priv = phydev->priv;
1326
1327 return (priv->an_result == AMD_XGBE_AN_COMPLETE);
1104} 1328}
1105 1329
1106static int amd_xgbe_phy_update_link(struct phy_device *phydev) 1330static int amd_xgbe_phy_update_link(struct phy_device *phydev)
1107{ 1331{
1108 struct amd_xgbe_phy_priv *priv = phydev->priv; 1332 struct amd_xgbe_phy_priv *priv = phydev->priv;
1109 enum amd_xgbe_phy_an state;
1110 unsigned int check_again, autoneg;
1111 int ret; 1333 int ret;
1112 1334
1113 /* If we're doing auto-negotiation don't report link down */ 1335 /* If we're doing auto-negotiation don't report link down */
1114 mutex_lock(&priv->an_mutex); 1336 if (priv->an_state != AMD_XGBE_AN_READY) {
1115 state = priv->an_state;
1116 mutex_unlock(&priv->an_mutex);
1117
1118 if (state != AMD_XGBE_AN_READY) {
1119 phydev->link = 1; 1337 phydev->link = 1;
1120 return 0; 1338 return 0;
1121 } 1339 }
1122 1340
1123 /* Since the device can be in the wrong mode when a link is
1124 * (re-)established (cable connected after the interface is
1125 * up, etc.), the link status may report no link. If there
1126 * is no link, try switching modes and checking the status
1127 * again if auto negotiation is enabled.
1128 */
1129 check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
1130again:
1131 /* Link status is latched low, so read once to clear 1341 /* Link status is latched low, so read once to clear
1132 * and then read again to get current state 1342 * and then read again to get current state
1133 */ 1343 */
@@ -1141,25 +1351,6 @@ again:
1141 1351
1142 phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0; 1352 phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
1143 1353
1144 if (!phydev->link) {
1145 if (check_again) {
1146 ret = amd_xgbe_phy_switch_mode(phydev);
1147 if (ret < 0)
1148 return ret;
1149 check_again = 0;
1150 goto again;
1151 }
1152 }
1153
1154 autoneg = (phydev->link && !priv->link) ? 1 : 0;
1155 priv->link = phydev->link;
1156 if (autoneg) {
1157 /* Link is (back) up, re-start auto-negotiation */
1158 ret = amd_xgbe_phy_config_aneg(phydev);
1159 if (ret < 0)
1160 return ret;
1161 }
1162
1163 return 0; 1354 return 0;
1164} 1355}
1165 1356
@@ -1249,6 +1440,7 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
1249 1440
1250static int amd_xgbe_phy_suspend(struct phy_device *phydev) 1441static int amd_xgbe_phy_suspend(struct phy_device *phydev)
1251{ 1442{
1443 struct amd_xgbe_phy_priv *priv = phydev->priv;
1252 int ret; 1444 int ret;
1253 1445
1254 mutex_lock(&phydev->lock); 1446 mutex_lock(&phydev->lock);
@@ -1257,6 +1449,8 @@ static int amd_xgbe_phy_suspend(struct phy_device *phydev)
1257 if (ret < 0) 1449 if (ret < 0)
1258 goto unlock; 1450 goto unlock;
1259 1451
1452 priv->lpm_ctrl = ret;
1453
1260 ret |= MDIO_CTRL1_LPOWER; 1454 ret |= MDIO_CTRL1_LPOWER;
1261 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); 1455 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1262 1456
@@ -1270,69 +1464,106 @@ unlock:
1270 1464
1271static int amd_xgbe_phy_resume(struct phy_device *phydev) 1465static int amd_xgbe_phy_resume(struct phy_device *phydev)
1272{ 1466{
1273 int ret; 1467 struct amd_xgbe_phy_priv *priv = phydev->priv;
1274 1468
1275 mutex_lock(&phydev->lock); 1469 mutex_lock(&phydev->lock);
1276 1470
1277 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 1471 priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
1278 if (ret < 0) 1472 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
1279 goto unlock;
1280 1473
1281 ret &= ~MDIO_CTRL1_LPOWER; 1474 mutex_unlock(&phydev->lock);
1282 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1283 1475
1284 ret = 0; 1476 return 0;
1477}
1285 1478
1286unlock: 1479static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
1287 mutex_unlock(&phydev->lock); 1480 unsigned int type)
1481{
1482 unsigned int count;
1483 int i;
1288 1484
1289 return ret; 1485 for (i = 0, count = 0; i < pdev->num_resources; i++) {
1486 struct resource *r = &pdev->resource[i];
1487
1488 if (type == resource_type(r))
1489 count++;
1490 }
1491
1492 return count;
1290} 1493}
1291 1494
1292static int amd_xgbe_phy_probe(struct phy_device *phydev) 1495static int amd_xgbe_phy_probe(struct phy_device *phydev)
1293{ 1496{
1294 struct amd_xgbe_phy_priv *priv; 1497 struct amd_xgbe_phy_priv *priv;
1295 struct platform_device *pdev; 1498 struct platform_device *phy_pdev;
1296 struct device *dev; 1499 struct device *dev, *phy_dev;
1297 char *wq_name; 1500 unsigned int phy_resnum, phy_irqnum;
1298 const __be32 *property;
1299 unsigned int speed_set;
1300 int ret; 1501 int ret;
1301 1502
1302 if (!phydev->dev.of_node) 1503 if (!phydev->bus || !phydev->bus->parent)
1303 return -EINVAL; 1504 return -EINVAL;
1304 1505
1305 pdev = of_find_device_by_node(phydev->dev.of_node); 1506 dev = phydev->bus->parent;
1306 if (!pdev)
1307 return -EINVAL;
1308 dev = &pdev->dev;
1309
1310 wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
1311 if (!wq_name) {
1312 ret = -ENOMEM;
1313 goto err_pdev;
1314 }
1315 1507
1316 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 1508 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1317 if (!priv) { 1509 if (!priv)
1318 ret = -ENOMEM; 1510 return -ENOMEM;
1319 goto err_name;
1320 }
1321 1511
1322 priv->pdev = pdev; 1512 priv->pdev = to_platform_device(dev);
1513 priv->adev = ACPI_COMPANION(dev);
1323 priv->dev = dev; 1514 priv->dev = dev;
1324 priv->phydev = phydev; 1515 priv->phydev = phydev;
1516 mutex_init(&priv->an_mutex);
1517 INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
1518 INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
1519
1520 if (!priv->adev || acpi_disabled) {
1521 struct device_node *bus_node;
1522 struct device_node *phy_node;
1523
1524 bus_node = priv->dev->of_node;
1525 phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
1526 if (!phy_node) {
1527 dev_err(dev, "unable to parse phy-handle\n");
1528 ret = -EINVAL;
1529 goto err_priv;
1530 }
1531
1532 phy_pdev = of_find_device_by_node(phy_node);
1533 of_node_put(phy_node);
1534
1535 if (!phy_pdev) {
1536 dev_err(dev, "unable to obtain phy device\n");
1537 ret = -EINVAL;
1538 goto err_priv;
1539 }
1540
1541 phy_resnum = 0;
1542 phy_irqnum = 0;
1543 } else {
1544 /* In ACPI, the XGBE and PHY resources are the grouped
1545 * together with the PHY resources at the end
1546 */
1547 phy_pdev = priv->pdev;
1548 phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
1549 IORESOURCE_MEM) - 3;
1550 phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
1551 IORESOURCE_IRQ) - 1;
1552 }
1553 phy_dev = &phy_pdev->dev;
1325 1554
1326 /* Get the device mmio areas */ 1555 /* Get the device mmio areas */
1327 priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1556 priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
1557 phy_resnum++);
1328 priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res); 1558 priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
1329 if (IS_ERR(priv->rxtx_regs)) { 1559 if (IS_ERR(priv->rxtx_regs)) {
1330 dev_err(dev, "rxtx ioremap failed\n"); 1560 dev_err(dev, "rxtx ioremap failed\n");
1331 ret = PTR_ERR(priv->rxtx_regs); 1561 ret = PTR_ERR(priv->rxtx_regs);
1332 goto err_priv; 1562 goto err_put;
1333 } 1563 }
1334 1564
1335 priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1565 priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
1566 phy_resnum++);
1336 priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res); 1567 priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
1337 if (IS_ERR(priv->sir0_regs)) { 1568 if (IS_ERR(priv->sir0_regs)) {
1338 dev_err(dev, "sir0 ioremap failed\n"); 1569 dev_err(dev, "sir0 ioremap failed\n");
@@ -1340,7 +1571,8 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
1340 goto err_rxtx; 1571 goto err_rxtx;
1341 } 1572 }
1342 1573
1343 priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 1574 priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
1575 phy_resnum++);
1344 priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res); 1576 priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
1345 if (IS_ERR(priv->sir1_regs)) { 1577 if (IS_ERR(priv->sir1_regs)) {
1346 dev_err(dev, "sir1 ioremap failed\n"); 1578 dev_err(dev, "sir1 ioremap failed\n");
@@ -1348,40 +1580,98 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
1348 goto err_sir0; 1580 goto err_sir0;
1349 } 1581 }
1350 1582
1583 /* Get the auto-negotiation interrupt */
1584 ret = platform_get_irq(phy_pdev, phy_irqnum);
1585 if (ret < 0) {
1586 dev_err(dev, "platform_get_irq failed\n");
1587 goto err_sir1;
1588 }
1589 priv->an_irq = ret;
1590
1351 /* Get the device speed set property */ 1591 /* Get the device speed set property */
1352 speed_set = 0; 1592 ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
1353 property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY, 1593 &priv->speed_set);
1354 NULL); 1594 if (ret) {
1355 if (property) 1595 dev_err(dev, "invalid %s property\n",
1356 speed_set = be32_to_cpu(*property); 1596 XGBE_PHY_SPEEDSET_PROPERTY);
1357 1597 goto err_sir1;
1358 switch (speed_set) { 1598 }
1359 case 0: 1599
1360 priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000; 1600 switch (priv->speed_set) {
1361 break; 1601 case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1362 case 1: 1602 case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1363 priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
1364 break; 1603 break;
1365 default: 1604 default:
1366 dev_err(dev, "invalid amd,speed-set property\n"); 1605 dev_err(dev, "invalid %s property\n",
1606 XGBE_PHY_SPEEDSET_PROPERTY);
1367 ret = -EINVAL; 1607 ret = -EINVAL;
1368 goto err_sir1; 1608 goto err_sir1;
1369 } 1609 }
1370 1610
1371 priv->link = 1; 1611 if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
1612 ret = device_property_read_u32_array(phy_dev,
1613 XGBE_PHY_BLWC_PROPERTY,
1614 priv->serdes_blwc,
1615 XGBE_PHY_SPEEDS);
1616 if (ret) {
1617 dev_err(dev, "invalid %s property\n",
1618 XGBE_PHY_BLWC_PROPERTY);
1619 goto err_sir1;
1620 }
1621 } else {
1622 memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
1623 sizeof(priv->serdes_blwc));
1624 }
1372 1625
1373 mutex_init(&priv->an_mutex); 1626 if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
1374 INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine); 1627 ret = device_property_read_u32_array(phy_dev,
1375 priv->an_workqueue = create_singlethread_workqueue(wq_name); 1628 XGBE_PHY_CDR_RATE_PROPERTY,
1376 if (!priv->an_workqueue) { 1629 priv->serdes_cdr_rate,
1377 ret = -ENOMEM; 1630 XGBE_PHY_SPEEDS);
1378 goto err_sir1; 1631 if (ret) {
1632 dev_err(dev, "invalid %s property\n",
1633 XGBE_PHY_CDR_RATE_PROPERTY);
1634 goto err_sir1;
1635 }
1636 } else {
1637 memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
1638 sizeof(priv->serdes_cdr_rate));
1639 }
1640
1641 if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
1642 ret = device_property_read_u32_array(phy_dev,
1643 XGBE_PHY_PQ_SKEW_PROPERTY,
1644 priv->serdes_pq_skew,
1645 XGBE_PHY_SPEEDS);
1646 if (ret) {
1647 dev_err(dev, "invalid %s property\n",
1648 XGBE_PHY_PQ_SKEW_PROPERTY);
1649 goto err_sir1;
1650 }
1651 } else {
1652 memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
1653 sizeof(priv->serdes_pq_skew));
1654 }
1655
1656 if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
1657 ret = device_property_read_u32_array(phy_dev,
1658 XGBE_PHY_TX_AMP_PROPERTY,
1659 priv->serdes_tx_amp,
1660 XGBE_PHY_SPEEDS);
1661 if (ret) {
1662 dev_err(dev, "invalid %s property\n",
1663 XGBE_PHY_TX_AMP_PROPERTY);
1664 goto err_sir1;
1665 }
1666 } else {
1667 memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
1668 sizeof(priv->serdes_tx_amp));
1379 } 1669 }
1380 1670
1381 phydev->priv = priv; 1671 phydev->priv = priv;
1382 1672
1383 kfree(wq_name); 1673 if (!priv->adev || acpi_disabled)
1384 of_dev_put(pdev); 1674 platform_device_put(phy_pdev);
1385 1675
1386 return 0; 1676 return 0;
1387 1677
@@ -1400,15 +1690,13 @@ err_rxtx:
1400 devm_release_mem_region(dev, priv->rxtx_res->start, 1690 devm_release_mem_region(dev, priv->rxtx_res->start,
1401 resource_size(priv->rxtx_res)); 1691 resource_size(priv->rxtx_res));
1402 1692
1693err_put:
1694 if (!priv->adev || acpi_disabled)
1695 platform_device_put(phy_pdev);
1696
1403err_priv: 1697err_priv:
1404 devm_kfree(dev, priv); 1698 devm_kfree(dev, priv);
1405 1699
1406err_name:
1407 kfree(wq_name);
1408
1409err_pdev:
1410 of_dev_put(pdev);
1411
1412 return ret; 1700 return ret;
1413} 1701}
1414 1702
@@ -1417,13 +1705,12 @@ static void amd_xgbe_phy_remove(struct phy_device *phydev)
1417 struct amd_xgbe_phy_priv *priv = phydev->priv; 1705 struct amd_xgbe_phy_priv *priv = phydev->priv;
1418 struct device *dev = priv->dev; 1706 struct device *dev = priv->dev;
1419 1707
1420 /* Stop any in process auto-negotiation */ 1708 if (priv->an_irq_allocated) {
1421 mutex_lock(&priv->an_mutex); 1709 devm_free_irq(dev, priv->an_irq, priv);
1422 priv->an_state = AMD_XGBE_AN_EXIT;
1423 mutex_unlock(&priv->an_mutex);
1424 1710
1425 flush_workqueue(priv->an_workqueue); 1711 flush_workqueue(priv->an_workqueue);
1426 destroy_workqueue(priv->an_workqueue); 1712 destroy_workqueue(priv->an_workqueue);
1713 }
1427 1714
1428 /* Release resources */ 1715 /* Release resources */
1429 devm_iounmap(dev, priv->sir1_regs); 1716 devm_iounmap(dev, priv->sir1_regs);
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 3ad0e6e16c39..a08a3c78ba97 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -168,7 +168,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
168 struct fixed_mdio_bus *fmb = &platform_fmb; 168 struct fixed_mdio_bus *fmb = &platform_fmb;
169 struct fixed_phy *fp; 169 struct fixed_phy *fp;
170 170
171 if (!link_update || !phydev || !phydev->bus) 171 if (!phydev || !phydev->bus)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 list_for_each_entry(fp, &fmb->phys, node) { 174 list_for_each_entry(fp, &fmb->phys, node) {
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 50051f271b10..095ef3fe369a 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -443,9 +443,13 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
443 if (!drv || !phydrv->suspend) 443 if (!drv || !phydrv->suspend)
444 return false; 444 return false;
445 445
446 /* PHY not attached? May suspend. */ 446 /* PHY not attached? May suspend if the PHY has not already been
447 * suspended as part of a prior call to phy_disconnect() ->
448 * phy_detach() -> phy_suspend() because the parent netdev might be the
449 * MDIO bus driver and clock gated at this point.
450 */
447 if (!netdev) 451 if (!netdev)
448 return true; 452 return !phydev->suspended;
449 453
450 /* Don't suspend PHY if the attched netdev parent may wakeup. 454 /* Don't suspend PHY if the attched netdev parent may wakeup.
451 * The parent may point to a PCI device, as in tg3 driver. 455 * The parent may point to a PCI device, as in tg3 driver.
@@ -465,7 +469,6 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
465 469
466static int mdio_bus_suspend(struct device *dev) 470static int mdio_bus_suspend(struct device *dev)
467{ 471{
468 struct phy_driver *phydrv = to_phy_driver(dev->driver);
469 struct phy_device *phydev = to_phy_device(dev); 472 struct phy_device *phydev = to_phy_device(dev);
470 473
471 /* We must stop the state machine manually, otherwise it stops out of 474 /* We must stop the state machine manually, otherwise it stops out of
@@ -479,19 +482,18 @@ static int mdio_bus_suspend(struct device *dev)
479 if (!mdio_bus_phy_may_suspend(phydev)) 482 if (!mdio_bus_phy_may_suspend(phydev))
480 return 0; 483 return 0;
481 484
482 return phydrv->suspend(phydev); 485 return phy_suspend(phydev);
483} 486}
484 487
485static int mdio_bus_resume(struct device *dev) 488static int mdio_bus_resume(struct device *dev)
486{ 489{
487 struct phy_driver *phydrv = to_phy_driver(dev->driver);
488 struct phy_device *phydev = to_phy_device(dev); 490 struct phy_device *phydev = to_phy_device(dev);
489 int ret; 491 int ret;
490 492
491 if (!mdio_bus_phy_may_suspend(phydev)) 493 if (!mdio_bus_phy_may_suspend(phydev))
492 goto no_resume; 494 goto no_resume;
493 495
494 ret = phydrv->resume(phydev); 496 ret = phy_resume(phydev);
495 if (ret < 0) 497 if (ret < 0)
496 return ret; 498 return ret;
497 499
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 767cd110f496..cdcac6aa4260 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -439,6 +439,9 @@ int phy_start_aneg(struct phy_device *phydev)
439 if (AUTONEG_DISABLE == phydev->autoneg) 439 if (AUTONEG_DISABLE == phydev->autoneg)
440 phy_sanitize_settings(phydev); 440 phy_sanitize_settings(phydev);
441 441
442 /* Invalidate LP advertising flags */
443 phydev->lp_advertising = 0;
444
442 err = phydev->drv->config_aneg(phydev); 445 err = phydev->drv->config_aneg(phydev);
443 if (err < 0) 446 if (err < 0)
444 goto out_unlock; 447 goto out_unlock;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3fc91e89f5a5..bdfe51fc3a65 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -699,6 +699,7 @@ int phy_suspend(struct phy_device *phydev)
699{ 699{
700 struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); 700 struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
701 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 701 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
702 int ret = 0;
702 703
703 /* If the device has WOL enabled, we cannot suspend the PHY */ 704 /* If the device has WOL enabled, we cannot suspend the PHY */
704 phy_ethtool_get_wol(phydev, &wol); 705 phy_ethtool_get_wol(phydev, &wol);
@@ -706,18 +707,31 @@ int phy_suspend(struct phy_device *phydev)
706 return -EBUSY; 707 return -EBUSY;
707 708
708 if (phydrv->suspend) 709 if (phydrv->suspend)
709 return phydrv->suspend(phydev); 710 ret = phydrv->suspend(phydev);
710 return 0; 711
712 if (ret)
713 return ret;
714
715 phydev->suspended = true;
716
717 return ret;
711} 718}
712EXPORT_SYMBOL(phy_suspend); 719EXPORT_SYMBOL(phy_suspend);
713 720
714int phy_resume(struct phy_device *phydev) 721int phy_resume(struct phy_device *phydev)
715{ 722{
716 struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); 723 struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
724 int ret = 0;
717 725
718 if (phydrv->resume) 726 if (phydrv->resume)
719 return phydrv->resume(phydev); 727 ret = phydrv->resume(phydev);
720 return 0; 728
729 if (ret)
730 return ret;
731
732 phydev->suspended = false;
733
734 return ret;
721} 735}
722EXPORT_SYMBOL(phy_resume); 736EXPORT_SYMBOL(phy_resume);
723 737
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f7ff493f1e73..0e62274e884a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -28,6 +28,7 @@
28#include <net/genetlink.h> 28#include <net/genetlink.h>
29#include <net/netlink.h> 29#include <net/netlink.h>
30#include <net/sch_generic.h> 30#include <net/sch_generic.h>
31#include <net/switchdev.h>
31#include <generated/utsrelease.h> 32#include <generated/utsrelease.h>
32#include <linux/if_team.h> 33#include <linux/if_team.h>
33 34
@@ -176,7 +177,6 @@ static int __team_option_inst_add(struct team *team, struct team_option *option,
176static int __team_option_inst_add_option(struct team *team, 177static int __team_option_inst_add_option(struct team *team,
177 struct team_option *option) 178 struct team_option *option)
178{ 179{
179 struct team_port *port;
180 int err; 180 int err;
181 181
182 if (!option->per_port) { 182 if (!option->per_port) {
@@ -184,12 +184,6 @@ static int __team_option_inst_add_option(struct team *team,
184 if (err) 184 if (err)
185 goto inst_del_option; 185 goto inst_del_option;
186 } 186 }
187
188 list_for_each_entry(port, &team->port_list, list) {
189 err = __team_option_inst_add(team, option, port);
190 if (err)
191 goto inst_del_option;
192 }
193 return 0; 187 return 0;
194 188
195inst_del_option: 189inst_del_option:
@@ -1932,7 +1926,7 @@ static netdev_features_t team_fix_features(struct net_device *dev,
1932 struct team *team = netdev_priv(dev); 1926 struct team *team = netdev_priv(dev);
1933 netdev_features_t mask; 1927 netdev_features_t mask;
1934 1928
1935 mask = features; 1929 mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
1936 features &= ~NETIF_F_ONE_FOR_ALL; 1930 features &= ~NETIF_F_ONE_FOR_ALL;
1937 features |= NETIF_F_ALL_FOR_ALL; 1931 features |= NETIF_F_ALL_FOR_ALL;
1938 1932
@@ -1982,6 +1976,8 @@ static const struct net_device_ops team_netdev_ops = {
1982 .ndo_del_slave = team_del_slave, 1976 .ndo_del_slave = team_del_slave,
1983 .ndo_fix_features = team_fix_features, 1977 .ndo_fix_features = team_fix_features,
1984 .ndo_change_carrier = team_change_carrier, 1978 .ndo_change_carrier = team_change_carrier,
1979 .ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
1980 .ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
1985}; 1981};
1986 1982
1987/*********************** 1983/***********************
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 10f9e4021b5a..857dca47bf80 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -123,10 +123,9 @@ struct tap_filter {
123 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 123 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
124}; 124};
125 125
126/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for 126/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
127 * the netdevice to be fit in one page. So we can make sure the success of 127 * to max number of VCPUs in guest. */
128 * memory allocation. TODO: increase the limit. */ 128#define MAX_TAP_QUEUES 256
129#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
130#define MAX_TAP_FLOWS 4096 129#define MAX_TAP_FLOWS 4096
131 130
132#define TUN_FLOW_EXPIRE (3 * HZ) 131#define TUN_FLOW_EXPIRE (3 * HZ)
@@ -257,7 +256,6 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
257{ 256{
258 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 257 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
259 e->rxhash, e->queue_index); 258 e->rxhash, e->queue_index);
260 sock_rps_reset_flow_hash(e->rps_rxhash);
261 hlist_del_rcu(&e->hash_link); 259 hlist_del_rcu(&e->hash_link);
262 kfree_rcu(e, rcu); 260 kfree_rcu(e, rcu);
263 --tun->flow_count; 261 --tun->flow_count;
@@ -374,10 +372,8 @@ unlock:
374 */ 372 */
375static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 373static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
376{ 374{
377 if (unlikely(e->rps_rxhash != hash)) { 375 if (unlikely(e->rps_rxhash != hash))
378 sock_rps_reset_flow_hash(e->rps_rxhash);
379 e->rps_rxhash = hash; 376 e->rps_rxhash = hash;
380 }
381} 377}
382 378
383/* We try to identify a flow through its rxhash first. The reason that 379/* We try to identify a flow through its rxhash first. The reason that
@@ -1247,7 +1243,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1247 int vlan_hlen = 0; 1243 int vlan_hlen = 0;
1248 int vnet_hdr_sz = 0; 1244 int vnet_hdr_sz = 0;
1249 1245
1250 if (vlan_tx_tag_present(skb)) 1246 if (skb_vlan_tag_present(skb))
1251 vlan_hlen = VLAN_HLEN; 1247 vlan_hlen = VLAN_HLEN;
1252 1248
1253 if (tun->flags & IFF_VNET_HDR) 1249 if (tun->flags & IFF_VNET_HDR)
@@ -1326,7 +1322,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1326 } veth; 1322 } veth;
1327 1323
1328 veth.h_vlan_proto = skb->vlan_proto; 1324 veth.h_vlan_proto = skb->vlan_proto;
1329 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 1325 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
1330 1326
1331 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 1327 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1332 1328
@@ -1368,7 +1364,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1368 skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0, 1364 skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
1369 &peeked, &off, &err); 1365 &peeked, &off, &err);
1370 if (!skb) 1366 if (!skb)
1371 return 0; 1367 return err;
1372 1368
1373 ret = tun_put_user(tun, tfile, skb, to); 1369 ret = tun_put_user(tun, tfile, skb, to);
1374 if (unlikely(ret < 0)) 1370 if (unlikely(ret < 0))
@@ -1489,7 +1485,7 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1489 goto out; 1485 goto out;
1490 } 1486 }
1491 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT); 1487 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT);
1492 if (ret > total_len) { 1488 if (ret > (ssize_t)total_len) {
1493 m->msg_flags |= MSG_TRUNC; 1489 m->msg_flags |= MSG_TRUNC;
1494 ret = flags & MSG_TRUNC ? ret : total_len; 1490 ret = flags & MSG_TRUNC ? ret : total_len;
1495 } 1491 }
@@ -1554,6 +1550,17 @@ static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1554static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 1550static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1555static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 1551static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1556 1552
1553static struct attribute *tun_dev_attrs[] = {
1554 &dev_attr_tun_flags.attr,
1555 &dev_attr_owner.attr,
1556 &dev_attr_group.attr,
1557 NULL
1558};
1559
1560static const struct attribute_group tun_attr_group = {
1561 .attrs = tun_dev_attrs
1562};
1563
1557static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 1564static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1558{ 1565{
1559 struct tun_struct *tun; 1566 struct tun_struct *tun;
@@ -1634,6 +1641,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1634 dev_net_set(dev, net); 1641 dev_net_set(dev, net);
1635 dev->rtnl_link_ops = &tun_link_ops; 1642 dev->rtnl_link_ops = &tun_link_ops;
1636 dev->ifindex = tfile->ifindex; 1643 dev->ifindex = tfile->ifindex;
1644 dev->sysfs_groups[0] = &tun_attr_group;
1637 1645
1638 tun = netdev_priv(dev); 1646 tun = netdev_priv(dev);
1639 tun->dev = dev; 1647 tun->dev = dev;
@@ -1669,11 +1677,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1669 err = register_netdevice(tun->dev); 1677 err = register_netdevice(tun->dev);
1670 if (err < 0) 1678 if (err < 0)
1671 goto err_detach; 1679 goto err_detach;
1672
1673 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1674 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1675 device_create_file(&tun->dev->dev, &dev_attr_group))
1676 pr_err("Failed to create tun sysfs files\n");
1677 } 1680 }
1678 1681
1679 netif_carrier_on(tun->dev); 1682 netif_carrier_on(tun->dev);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 9c5aa922a9f4..6b8efcabb816 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -58,7 +58,6 @@
58#include <linux/module.h> 58#include <linux/module.h>
59#include <linux/ethtool.h> 59#include <linux/ethtool.h>
60#include <linux/usb.h> 60#include <linux/usb.h>
61#include <linux/timer.h>
62#include <linux/tty.h> 61#include <linux/tty.h>
63#include <linux/tty_driver.h> 62#include <linux/tty_driver.h>
64#include <linux/tty_flip.h> 63#include <linux/tty_flip.h>
@@ -154,6 +153,7 @@ struct hso_net {
154 struct hso_device *parent; 153 struct hso_device *parent;
155 struct net_device *net; 154 struct net_device *net;
156 struct rfkill *rfkill; 155 struct rfkill *rfkill;
156 char name[24];
157 157
158 struct usb_endpoint_descriptor *in_endp; 158 struct usb_endpoint_descriptor *in_endp;
159 struct usb_endpoint_descriptor *out_endp; 159 struct usb_endpoint_descriptor *out_endp;
@@ -274,7 +274,6 @@ struct hso_device {
274 u8 usb_gone; 274 u8 usb_gone;
275 struct work_struct async_get_intf; 275 struct work_struct async_get_intf;
276 struct work_struct async_put_intf; 276 struct work_struct async_put_intf;
277 struct work_struct reset_device;
278 277
279 struct usb_device *usb; 278 struct usb_device *usb;
280 struct usb_interface *interface; 279 struct usb_interface *interface;
@@ -340,7 +339,6 @@ static void async_put_intf(struct work_struct *data);
340static int hso_put_activity(struct hso_device *hso_dev); 339static int hso_put_activity(struct hso_device *hso_dev);
341static int hso_get_activity(struct hso_device *hso_dev); 340static int hso_get_activity(struct hso_device *hso_dev);
342static void tiocmget_intr_callback(struct urb *urb); 341static void tiocmget_intr_callback(struct urb *urb);
343static void reset_device(struct work_struct *data);
344/*****************************************************************************/ 342/*****************************************************************************/
345/* Helping functions */ 343/* Helping functions */
346/*****************************************************************************/ 344/*****************************************************************************/
@@ -533,6 +531,13 @@ static ssize_t hso_sysfs_show_porttype(struct device *dev,
533} 531}
534static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL); 532static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL);
535 533
534static struct attribute *hso_serial_dev_attrs[] = {
535 &dev_attr_hsotype.attr,
536 NULL
537};
538
539ATTRIBUTE_GROUPS(hso_serial_dev);
540
536static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb) 541static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb)
537{ 542{
538 int idx; 543 int idx;
@@ -696,7 +701,7 @@ static void handle_usb_error(int status, const char *function,
696 case -ETIMEDOUT: 701 case -ETIMEDOUT:
697 explanation = "protocol error"; 702 explanation = "protocol error";
698 if (hso_dev) 703 if (hso_dev)
699 schedule_work(&hso_dev->reset_device); 704 usb_queue_reset_device(hso_dev->interface);
700 break; 705 break;
701 default: 706 default:
702 explanation = "unknown status"; 707 explanation = "unknown status";
@@ -1271,7 +1276,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1271 goto err_out; 1276 goto err_out;
1272 1277
1273 D1("Opening %d", serial->minor); 1278 D1("Opening %d", serial->minor);
1274 kref_get(&serial->parent->ref);
1275 1279
1276 /* setup */ 1280 /* setup */
1277 tty->driver_data = serial; 1281 tty->driver_data = serial;
@@ -1290,7 +1294,8 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1290 if (result) { 1294 if (result) {
1291 hso_stop_serial_device(serial->parent); 1295 hso_stop_serial_device(serial->parent);
1292 serial->port.count--; 1296 serial->port.count--;
1293 kref_put(&serial->parent->ref, hso_serial_ref_free); 1297 } else {
1298 kref_get(&serial->parent->ref);
1294 } 1299 }
1295 } else { 1300 } else {
1296 D1("Port was already open"); 1301 D1("Port was already open");
@@ -1340,8 +1345,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1340 usb_autopm_put_interface(serial->parent->interface); 1345 usb_autopm_put_interface(serial->parent->interface);
1341 1346
1342 mutex_unlock(&serial->parent->mutex); 1347 mutex_unlock(&serial->parent->mutex);
1343
1344 kref_put(&serial->parent->ref, hso_serial_ref_free);
1345} 1348}
1346 1349
1347/* close the requested serial port */ 1350/* close the requested serial port */
@@ -1392,6 +1395,16 @@ static int hso_serial_write_room(struct tty_struct *tty)
1392 return room; 1395 return room;
1393} 1396}
1394 1397
1398static void hso_serial_cleanup(struct tty_struct *tty)
1399{
1400 struct hso_serial *serial = tty->driver_data;
1401
1402 if (!serial)
1403 return;
1404
1405 kref_put(&serial->parent->ref, hso_serial_ref_free);
1406}
1407
1395/* setup the term */ 1408/* setup the term */
1396static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) 1409static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
1397{ 1410{
@@ -2198,8 +2211,8 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
2198 2211
2199 for (i = 0; i < serial->num_rx_urbs; i++) { 2212 for (i = 0; i < serial->num_rx_urbs; i++) {
2200 if (serial->rx_urb[i]) { 2213 if (serial->rx_urb[i]) {
2201 usb_kill_urb(serial->rx_urb[i]); 2214 usb_kill_urb(serial->rx_urb[i]);
2202 serial->rx_urb_filled[i] = 0; 2215 serial->rx_urb_filled[i] = 0;
2203 } 2216 }
2204 } 2217 }
2205 serial->curr_rx_urb_idx = 0; 2218 serial->curr_rx_urb_idx = 0;
@@ -2228,15 +2241,15 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
2228 return 0; 2241 return 0;
2229} 2242}
2230 2243
2244static void hso_serial_tty_unregister(struct hso_serial *serial)
2245{
2246 tty_unregister_device(tty_drv, serial->minor);
2247}
2248
2231static void hso_serial_common_free(struct hso_serial *serial) 2249static void hso_serial_common_free(struct hso_serial *serial)
2232{ 2250{
2233 int i; 2251 int i;
2234 2252
2235 if (serial->parent->dev)
2236 device_remove_file(serial->parent->dev, &dev_attr_hsotype);
2237
2238 tty_unregister_device(tty_drv, serial->minor);
2239
2240 for (i = 0; i < serial->num_rx_urbs; i++) { 2253 for (i = 0; i < serial->num_rx_urbs; i++) {
2241 /* unlink and free RX URB */ 2254 /* unlink and free RX URB */
2242 usb_free_urb(serial->rx_urb[i]); 2255 usb_free_urb(serial->rx_urb[i]);
@@ -2246,6 +2259,7 @@ static void hso_serial_common_free(struct hso_serial *serial)
2246 2259
2247 /* unlink and free TX URB */ 2260 /* unlink and free TX URB */
2248 usb_free_urb(serial->tx_urb); 2261 usb_free_urb(serial->tx_urb);
2262 kfree(serial->tx_buffer);
2249 kfree(serial->tx_data); 2263 kfree(serial->tx_data);
2250 tty_port_destroy(&serial->port); 2264 tty_port_destroy(&serial->port);
2251} 2265}
@@ -2264,11 +2278,10 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
2264 goto exit; 2278 goto exit;
2265 2279
2266 /* register our minor number */ 2280 /* register our minor number */
2267 serial->parent->dev = tty_port_register_device(&serial->port, tty_drv, 2281 serial->parent->dev = tty_port_register_device_attr(&serial->port,
2268 minor, &serial->parent->interface->dev); 2282 tty_drv, minor, &serial->parent->interface->dev,
2283 serial->parent, hso_serial_dev_groups);
2269 dev = serial->parent->dev; 2284 dev = serial->parent->dev;
2270 dev_set_drvdata(dev, serial->parent);
2271 i = device_create_file(dev, &dev_attr_hsotype);
2272 2285
2273 /* fill in specific data for later use */ 2286 /* fill in specific data for later use */
2274 serial->minor = minor; 2287 serial->minor = minor;
@@ -2316,6 +2329,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
2316 2329
2317 return 0; 2330 return 0;
2318exit: 2331exit:
2332 hso_serial_tty_unregister(serial);
2319 hso_serial_common_free(serial); 2333 hso_serial_common_free(serial);
2320 return -1; 2334 return -1;
2321} 2335}
@@ -2338,7 +2352,6 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
2338 2352
2339 INIT_WORK(&hso_dev->async_get_intf, async_get_intf); 2353 INIT_WORK(&hso_dev->async_get_intf, async_get_intf);
2340 INIT_WORK(&hso_dev->async_put_intf, async_put_intf); 2354 INIT_WORK(&hso_dev->async_put_intf, async_put_intf);
2341 INIT_WORK(&hso_dev->reset_device, reset_device);
2342 2355
2343 return hso_dev; 2356 return hso_dev;
2344} 2357}
@@ -2459,27 +2472,21 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
2459{ 2472{
2460 struct hso_net *hso_net = dev2net(hso_dev); 2473 struct hso_net *hso_net = dev2net(hso_dev);
2461 struct device *dev = &hso_net->net->dev; 2474 struct device *dev = &hso_net->net->dev;
2462 char *rfkn; 2475 static u32 rfkill_counter;
2463 2476
2464 rfkn = kzalloc(20, GFP_KERNEL); 2477 snprintf(hso_net->name, sizeof(hso_net->name), "hso-%d",
2465 if (!rfkn) 2478 rfkill_counter++);
2466 dev_err(dev, "%s - Out of memory\n", __func__);
2467
2468 snprintf(rfkn, 20, "hso-%d",
2469 interface->altsetting->desc.bInterfaceNumber);
2470 2479
2471 hso_net->rfkill = rfkill_alloc(rfkn, 2480 hso_net->rfkill = rfkill_alloc(hso_net->name,
2472 &interface_to_usbdev(interface)->dev, 2481 &interface_to_usbdev(interface)->dev,
2473 RFKILL_TYPE_WWAN, 2482 RFKILL_TYPE_WWAN,
2474 &hso_rfkill_ops, hso_dev); 2483 &hso_rfkill_ops, hso_dev);
2475 if (!hso_net->rfkill) { 2484 if (!hso_net->rfkill) {
2476 dev_err(dev, "%s - Out of memory\n", __func__); 2485 dev_err(dev, "%s - Out of memory\n", __func__);
2477 kfree(rfkn);
2478 return; 2486 return;
2479 } 2487 }
2480 if (rfkill_register(hso_net->rfkill) < 0) { 2488 if (rfkill_register(hso_net->rfkill) < 0) {
2481 rfkill_destroy(hso_net->rfkill); 2489 rfkill_destroy(hso_net->rfkill);
2482 kfree(rfkn);
2483 hso_net->rfkill = NULL; 2490 hso_net->rfkill = NULL;
2484 dev_err(dev, "%s - Failed to register rfkill\n", __func__); 2491 dev_err(dev, "%s - Failed to register rfkill\n", __func__);
2485 return; 2492 return;
@@ -2594,7 +2601,6 @@ static void hso_free_serial_device(struct hso_device *hso_dev)
2594 2601
2595 if (!serial) 2602 if (!serial)
2596 return; 2603 return;
2597 set_serial_by_index(serial->minor, NULL);
2598 2604
2599 hso_serial_common_free(serial); 2605 hso_serial_common_free(serial);
2600 2606
@@ -2684,6 +2690,7 @@ static struct hso_device *hso_create_bulk_serial_device(
2684 return hso_dev; 2690 return hso_dev;
2685 2691
2686exit2: 2692exit2:
2693 hso_serial_tty_unregister(serial);
2687 hso_serial_common_free(serial); 2694 hso_serial_common_free(serial);
2688exit: 2695exit:
2689 hso_free_tiomget(serial); 2696 hso_free_tiomget(serial);
@@ -3083,26 +3090,6 @@ out:
3083 return result; 3090 return result;
3084} 3091}
3085 3092
3086static void reset_device(struct work_struct *data)
3087{
3088 struct hso_device *hso_dev =
3089 container_of(data, struct hso_device, reset_device);
3090 struct usb_device *usb = hso_dev->usb;
3091 int result;
3092
3093 if (hso_dev->usb_gone) {
3094 D1("No reset during disconnect\n");
3095 } else {
3096 result = usb_lock_device_for_reset(usb, hso_dev->interface);
3097 if (result < 0)
3098 D1("unable to lock device for reset: %d\n", result);
3099 else {
3100 usb_reset_device(usb);
3101 usb_unlock_device(usb);
3102 }
3103 }
3104}
3105
3106static void hso_serial_ref_free(struct kref *ref) 3093static void hso_serial_ref_free(struct kref *ref)
3107{ 3094{
3108 struct hso_device *hso_dev = container_of(ref, struct hso_device, ref); 3095 struct hso_device *hso_dev = container_of(ref, struct hso_device, ref);
@@ -3112,18 +3099,22 @@ static void hso_serial_ref_free(struct kref *ref)
3112 3099
3113static void hso_free_interface(struct usb_interface *interface) 3100static void hso_free_interface(struct usb_interface *interface)
3114{ 3101{
3115 struct hso_serial *hso_dev; 3102 struct hso_serial *serial;
3116 int i; 3103 int i;
3117 3104
3118 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { 3105 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
3119 if (serial_table[i] && 3106 if (serial_table[i] &&
3120 (serial_table[i]->interface == interface)) { 3107 (serial_table[i]->interface == interface)) {
3121 hso_dev = dev2ser(serial_table[i]); 3108 serial = dev2ser(serial_table[i]);
3122 tty_port_tty_hangup(&hso_dev->port, false); 3109 tty_port_tty_hangup(&serial->port, false);
3123 mutex_lock(&hso_dev->parent->mutex); 3110 mutex_lock(&serial->parent->mutex);
3124 hso_dev->parent->usb_gone = 1; 3111 serial->parent->usb_gone = 1;
3125 mutex_unlock(&hso_dev->parent->mutex); 3112 mutex_unlock(&serial->parent->mutex);
3113 cancel_work_sync(&serial_table[i]->async_put_intf);
3114 cancel_work_sync(&serial_table[i]->async_get_intf);
3115 hso_serial_tty_unregister(serial);
3126 kref_put(&serial_table[i]->ref, hso_serial_ref_free); 3116 kref_put(&serial_table[i]->ref, hso_serial_ref_free);
3117 set_serial_by_index(i, NULL);
3127 } 3118 }
3128 } 3119 }
3129 3120
@@ -3215,6 +3206,7 @@ static const struct tty_operations hso_serial_ops = {
3215 .close = hso_serial_close, 3206 .close = hso_serial_close,
3216 .write = hso_serial_write, 3207 .write = hso_serial_write,
3217 .write_room = hso_serial_write_room, 3208 .write_room = hso_serial_write_room,
3209 .cleanup = hso_serial_cleanup,
3218 .ioctl = hso_serial_ioctl, 3210 .ioctl = hso_serial_ioctl,
3219 .set_termios = hso_serial_set_termios, 3211 .set_termios = hso_serial_set_termios,
3220 .chars_in_buffer = hso_serial_chars_in_buffer, 3212 .chars_in_buffer = hso_serial_chars_in_buffer,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index bf405f134d3a..5980ac6c48dd 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -27,7 +27,7 @@
27#include <linux/usb/cdc.h> 27#include <linux/usb/cdc.h>
28 28
29/* Version Information */ 29/* Version Information */
30#define DRIVER_VERSION "v1.07.0 (2014/10/09)" 30#define DRIVER_VERSION "v1.08.0 (2015/01/13)"
31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
33#define MODULENAME "r8152" 33#define MODULENAME "r8152"
@@ -448,6 +448,7 @@ enum rtl_register_content {
448#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + VLAN_HLEN) 448#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
449#define RTL8153_RMS RTL8153_MAX_PACKET 449#define RTL8153_RMS RTL8153_MAX_PACKET
450#define RTL8152_TX_TIMEOUT (5 * HZ) 450#define RTL8152_TX_TIMEOUT (5 * HZ)
451#define RTL8152_NAPI_WEIGHT 64
451 452
452/* rtl8152 flags */ 453/* rtl8152 flags */
453enum rtl8152_flags { 454enum rtl8152_flags {
@@ -457,7 +458,7 @@ enum rtl8152_flags {
457 RTL8152_LINK_CHG, 458 RTL8152_LINK_CHG,
458 SELECTIVE_SUSPEND, 459 SELECTIVE_SUSPEND,
459 PHY_RESET, 460 PHY_RESET,
460 SCHEDULE_TASKLET, 461 SCHEDULE_NAPI,
461}; 462};
462 463
463/* Define these values to match your device */ 464/* Define these values to match your device */
@@ -488,16 +489,16 @@ struct rx_desc {
488#define RX_LEN_MASK 0x7fff 489#define RX_LEN_MASK 0x7fff
489 490
490 __le32 opts2; 491 __le32 opts2;
491#define RD_UDP_CS (1 << 23) 492#define RD_UDP_CS BIT(23)
492#define RD_TCP_CS (1 << 22) 493#define RD_TCP_CS BIT(22)
493#define RD_IPV6_CS (1 << 20) 494#define RD_IPV6_CS BIT(20)
494#define RD_IPV4_CS (1 << 19) 495#define RD_IPV4_CS BIT(19)
495 496
496 __le32 opts3; 497 __le32 opts3;
497#define IPF (1 << 23) /* IP checksum fail */ 498#define IPF BIT(23) /* IP checksum fail */
498#define UDPF (1 << 22) /* UDP checksum fail */ 499#define UDPF BIT(22) /* UDP checksum fail */
499#define TCPF (1 << 21) /* TCP checksum fail */ 500#define TCPF BIT(21) /* TCP checksum fail */
500#define RX_VLAN_TAG (1 << 16) 501#define RX_VLAN_TAG BIT(16)
501 502
502 __le32 opts4; 503 __le32 opts4;
503 __le32 opts5; 504 __le32 opts5;
@@ -506,24 +507,24 @@ struct rx_desc {
506 507
507struct tx_desc { 508struct tx_desc {
508 __le32 opts1; 509 __le32 opts1;
509#define TX_FS (1 << 31) /* First segment of a packet */ 510#define TX_FS BIT(31) /* First segment of a packet */
510#define TX_LS (1 << 30) /* Final segment of a packet */ 511#define TX_LS BIT(30) /* Final segment of a packet */
511#define GTSENDV4 (1 << 28) 512#define GTSENDV4 BIT(28)
512#define GTSENDV6 (1 << 27) 513#define GTSENDV6 BIT(27)
513#define GTTCPHO_SHIFT 18 514#define GTTCPHO_SHIFT 18
514#define GTTCPHO_MAX 0x7fU 515#define GTTCPHO_MAX 0x7fU
515#define TX_LEN_MAX 0x3ffffU 516#define TX_LEN_MAX 0x3ffffU
516 517
517 __le32 opts2; 518 __le32 opts2;
518#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */ 519#define UDP_CS BIT(31) /* Calculate UDP/IP checksum */
519#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */ 520#define TCP_CS BIT(30) /* Calculate TCP/IP checksum */
520#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */ 521#define IPV4_CS BIT(29) /* Calculate IPv4 checksum */
521#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */ 522#define IPV6_CS BIT(28) /* Calculate IPv6 checksum */
522#define MSS_SHIFT 17 523#define MSS_SHIFT 17
523#define MSS_MAX 0x7ffU 524#define MSS_MAX 0x7ffU
524#define TCPHO_SHIFT 17 525#define TCPHO_SHIFT 17
525#define TCPHO_MAX 0x7ffU 526#define TCPHO_MAX 0x7ffU
526#define TX_VLAN_TAG (1 << 16) 527#define TX_VLAN_TAG BIT(16)
527}; 528};
528 529
529struct r8152; 530struct r8152;
@@ -549,14 +550,14 @@ struct tx_agg {
549struct r8152 { 550struct r8152 {
550 unsigned long flags; 551 unsigned long flags;
551 struct usb_device *udev; 552 struct usb_device *udev;
552 struct tasklet_struct tl; 553 struct napi_struct napi;
553 struct usb_interface *intf; 554 struct usb_interface *intf;
554 struct net_device *netdev; 555 struct net_device *netdev;
555 struct urb *intr_urb; 556 struct urb *intr_urb;
556 struct tx_agg tx_info[RTL8152_MAX_TX]; 557 struct tx_agg tx_info[RTL8152_MAX_TX];
557 struct rx_agg rx_info[RTL8152_MAX_RX]; 558 struct rx_agg rx_info[RTL8152_MAX_RX];
558 struct list_head rx_done, tx_free; 559 struct list_head rx_done, tx_free;
559 struct sk_buff_head tx_queue; 560 struct sk_buff_head tx_queue, rx_queue;
560 spinlock_t rx_lock, tx_lock; 561 spinlock_t rx_lock, tx_lock;
561 struct delayed_work schedule; 562 struct delayed_work schedule;
562 struct mii_if_info mii; 563 struct mii_if_info mii;
@@ -580,7 +581,6 @@ struct r8152 {
580 u16 ocp_base; 581 u16 ocp_base;
581 u8 *intr_buff; 582 u8 *intr_buff;
582 u8 version; 583 u8 version;
583 u8 speed;
584}; 584};
585 585
586enum rtl_version { 586enum rtl_version {
@@ -1050,7 +1050,7 @@ static void read_bulk_callback(struct urb *urb)
1050 spin_lock(&tp->rx_lock); 1050 spin_lock(&tp->rx_lock);
1051 list_add_tail(&agg->list, &tp->rx_done); 1051 list_add_tail(&agg->list, &tp->rx_done);
1052 spin_unlock(&tp->rx_lock); 1052 spin_unlock(&tp->rx_lock);
1053 tasklet_schedule(&tp->tl); 1053 napi_schedule(&tp->napi);
1054 return; 1054 return;
1055 case -ESHUTDOWN: 1055 case -ESHUTDOWN:
1056 set_bit(RTL8152_UNPLUG, &tp->flags); 1056 set_bit(RTL8152_UNPLUG, &tp->flags);
@@ -1114,7 +1114,7 @@ static void write_bulk_callback(struct urb *urb)
1114 return; 1114 return;
1115 1115
1116 if (!skb_queue_empty(&tp->tx_queue)) 1116 if (!skb_queue_empty(&tp->tx_queue))
1117 tasklet_schedule(&tp->tl); 1117 napi_schedule(&tp->napi);
1118} 1118}
1119 1119
1120static void intr_callback(struct urb *urb) 1120static void intr_callback(struct urb *urb)
@@ -1156,12 +1156,12 @@ static void intr_callback(struct urb *urb)
1156 1156
1157 d = urb->transfer_buffer; 1157 d = urb->transfer_buffer;
1158 if (INTR_LINK & __le16_to_cpu(d[0])) { 1158 if (INTR_LINK & __le16_to_cpu(d[0])) {
1159 if (!(tp->speed & LINK_STATUS)) { 1159 if (!netif_carrier_ok(tp->netdev)) {
1160 set_bit(RTL8152_LINK_CHG, &tp->flags); 1160 set_bit(RTL8152_LINK_CHG, &tp->flags);
1161 schedule_delayed_work(&tp->schedule, 0); 1161 schedule_delayed_work(&tp->schedule, 0);
1162 } 1162 }
1163 } else { 1163 } else {
1164 if (tp->speed & LINK_STATUS) { 1164 if (netif_carrier_ok(tp->netdev)) {
1165 set_bit(RTL8152_LINK_CHG, &tp->flags); 1165 set_bit(RTL8152_LINK_CHG, &tp->flags);
1166 schedule_delayed_work(&tp->schedule, 0); 1166 schedule_delayed_work(&tp->schedule, 0);
1167 } 1167 }
@@ -1233,6 +1233,7 @@ static int alloc_all_mem(struct r8152 *tp)
1233 spin_lock_init(&tp->tx_lock); 1233 spin_lock_init(&tp->tx_lock);
1234 INIT_LIST_HEAD(&tp->tx_free); 1234 INIT_LIST_HEAD(&tp->tx_free);
1235 skb_queue_head_init(&tp->tx_queue); 1235 skb_queue_head_init(&tp->tx_queue);
1236 skb_queue_head_init(&tp->rx_queue);
1236 1237
1237 for (i = 0; i < RTL8152_MAX_RX; i++) { 1238 for (i = 0; i < RTL8152_MAX_RX; i++) {
1238 buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); 1239 buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
@@ -1329,18 +1330,6 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
1329 return agg; 1330 return agg;
1330} 1331}
1331 1332
1332static inline __be16 get_protocol(struct sk_buff *skb)
1333{
1334 __be16 protocol;
1335
1336 if (skb->protocol == htons(ETH_P_8021Q))
1337 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
1338 else
1339 protocol = skb->protocol;
1340
1341 return protocol;
1342}
1343
1344/* r8152_csum_workaround() 1333/* r8152_csum_workaround()
1345 * The hw limites the value the transport offset. When the offset is out of the 1334 * The hw limites the value the transport offset. When the offset is out of the
1346 * range, calculate the checksum by sw. 1335 * range, calculate the checksum by sw.
@@ -1409,10 +1398,10 @@ static int msdn_giant_send_check(struct sk_buff *skb)
1409 1398
1410static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb) 1399static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb)
1411{ 1400{
1412 if (vlan_tx_tag_present(skb)) { 1401 if (skb_vlan_tag_present(skb)) {
1413 u32 opts2; 1402 u32 opts2;
1414 1403
1415 opts2 = TX_VLAN_TAG | swab16(vlan_tx_tag_get(skb)); 1404 opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb));
1416 desc->opts2 |= cpu_to_le32(opts2); 1405 desc->opts2 |= cpu_to_le32(opts2);
1417 } 1406 }
1418} 1407}
@@ -1446,7 +1435,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
1446 goto unavailable; 1435 goto unavailable;
1447 } 1436 }
1448 1437
1449 switch (get_protocol(skb)) { 1438 switch (vlan_get_protocol(skb)) {
1450 case htons(ETH_P_IP): 1439 case htons(ETH_P_IP):
1451 opts1 |= GTSENDV4; 1440 opts1 |= GTSENDV4;
1452 break; 1441 break;
@@ -1477,7 +1466,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
1477 goto unavailable; 1466 goto unavailable;
1478 } 1467 }
1479 1468
1480 switch (get_protocol(skb)) { 1469 switch (vlan_get_protocol(skb)) {
1481 case htons(ETH_P_IP): 1470 case htons(ETH_P_IP):
1482 opts2 |= IPV4_CS; 1471 opts2 |= IPV4_CS;
1483 ip_protocol = ip_hdr(skb)->protocol; 1472 ip_protocol = ip_hdr(skb)->protocol;
@@ -1637,13 +1626,32 @@ return_result:
1637 return checksum; 1626 return checksum;
1638} 1627}
1639 1628
1640static void rx_bottom(struct r8152 *tp) 1629static int rx_bottom(struct r8152 *tp, int budget)
1641{ 1630{
1642 unsigned long flags; 1631 unsigned long flags;
1643 struct list_head *cursor, *next, rx_queue; 1632 struct list_head *cursor, *next, rx_queue;
1633 int ret = 0, work_done = 0;
1634
1635 if (!skb_queue_empty(&tp->rx_queue)) {
1636 while (work_done < budget) {
1637 struct sk_buff *skb = __skb_dequeue(&tp->rx_queue);
1638 struct net_device *netdev = tp->netdev;
1639 struct net_device_stats *stats = &netdev->stats;
1640 unsigned int pkt_len;
1641
1642 if (!skb)
1643 break;
1644
1645 pkt_len = skb->len;
1646 napi_gro_receive(&tp->napi, skb);
1647 work_done++;
1648 stats->rx_packets++;
1649 stats->rx_bytes += pkt_len;
1650 }
1651 }
1644 1652
1645 if (list_empty(&tp->rx_done)) 1653 if (list_empty(&tp->rx_done))
1646 return; 1654 goto out1;
1647 1655
1648 INIT_LIST_HEAD(&rx_queue); 1656 INIT_LIST_HEAD(&rx_queue);
1649 spin_lock_irqsave(&tp->rx_lock, flags); 1657 spin_lock_irqsave(&tp->rx_lock, flags);
@@ -1696,9 +1704,14 @@ static void rx_bottom(struct r8152 *tp)
1696 skb_put(skb, pkt_len); 1704 skb_put(skb, pkt_len);
1697 skb->protocol = eth_type_trans(skb, netdev); 1705 skb->protocol = eth_type_trans(skb, netdev);
1698 rtl_rx_vlan_tag(rx_desc, skb); 1706 rtl_rx_vlan_tag(rx_desc, skb);
1699 netif_receive_skb(skb); 1707 if (work_done < budget) {
1700 stats->rx_packets++; 1708 napi_gro_receive(&tp->napi, skb);
1701 stats->rx_bytes += pkt_len; 1709 work_done++;
1710 stats->rx_packets++;
1711 stats->rx_bytes += pkt_len;
1712 } else {
1713 __skb_queue_tail(&tp->rx_queue, skb);
1714 }
1702 1715
1703find_next_rx: 1716find_next_rx:
1704 rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE); 1717 rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
@@ -1708,8 +1721,22 @@ find_next_rx:
1708 } 1721 }
1709 1722
1710submit: 1723submit:
1711 r8152_submit_rx(tp, agg, GFP_ATOMIC); 1724 if (!ret) {
1725 ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
1726 } else {
1727 urb->actual_length = 0;
1728 list_add_tail(&agg->list, next);
1729 }
1712 } 1730 }
1731
1732 if (!list_empty(&rx_queue)) {
1733 spin_lock_irqsave(&tp->rx_lock, flags);
1734 list_splice_tail(&rx_queue, &tp->rx_done);
1735 spin_unlock_irqrestore(&tp->rx_lock, flags);
1736 }
1737
1738out1:
1739 return work_done;
1713} 1740}
1714 1741
1715static void tx_bottom(struct r8152 *tp) 1742static void tx_bottom(struct r8152 *tp)
@@ -1749,12 +1776,8 @@ static void tx_bottom(struct r8152 *tp)
1749 } while (res == 0); 1776 } while (res == 0);
1750} 1777}
1751 1778
1752static void bottom_half(unsigned long data) 1779static void bottom_half(struct r8152 *tp)
1753{ 1780{
1754 struct r8152 *tp;
1755
1756 tp = (struct r8152 *)data;
1757
1758 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 1781 if (test_bit(RTL8152_UNPLUG, &tp->flags))
1759 return; 1782 return;
1760 1783
@@ -1766,17 +1789,38 @@ static void bottom_half(unsigned long data)
1766 if (!netif_carrier_ok(tp->netdev)) 1789 if (!netif_carrier_ok(tp->netdev))
1767 return; 1790 return;
1768 1791
1769 clear_bit(SCHEDULE_TASKLET, &tp->flags); 1792 clear_bit(SCHEDULE_NAPI, &tp->flags);
1770 1793
1771 rx_bottom(tp);
1772 tx_bottom(tp); 1794 tx_bottom(tp);
1773} 1795}
1774 1796
1797static int r8152_poll(struct napi_struct *napi, int budget)
1798{
1799 struct r8152 *tp = container_of(napi, struct r8152, napi);
1800 int work_done;
1801
1802 work_done = rx_bottom(tp, budget);
1803 bottom_half(tp);
1804
1805 if (work_done < budget) {
1806 napi_complete(napi);
1807 if (!list_empty(&tp->rx_done))
1808 napi_schedule(napi);
1809 }
1810
1811 return work_done;
1812}
1813
1775static 1814static
1776int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) 1815int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
1777{ 1816{
1778 int ret; 1817 int ret;
1779 1818
1819 /* The rx would be stopped, so skip submitting */
1820 if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
1821 !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
1822 return 0;
1823
1780 usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), 1824 usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
1781 agg->head, agg_buf_sz, 1825 agg->head, agg_buf_sz,
1782 (usb_complete_t)read_bulk_callback, agg); 1826 (usb_complete_t)read_bulk_callback, agg);
@@ -1793,7 +1837,11 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
1793 spin_lock_irqsave(&tp->rx_lock, flags); 1837 spin_lock_irqsave(&tp->rx_lock, flags);
1794 list_add_tail(&agg->list, &tp->rx_done); 1838 list_add_tail(&agg->list, &tp->rx_done);
1795 spin_unlock_irqrestore(&tp->rx_lock, flags); 1839 spin_unlock_irqrestore(&tp->rx_lock, flags);
1796 tasklet_schedule(&tp->tl); 1840
1841 netif_err(tp, rx_err, tp->netdev,
1842 "Couldn't submit rx[%p], ret = %d\n", agg, ret);
1843
1844 napi_schedule(&tp->napi);
1797 } 1845 }
1798 1846
1799 return ret; 1847 return ret;
@@ -1833,7 +1881,7 @@ static void rtl8152_set_rx_mode(struct net_device *netdev)
1833{ 1881{
1834 struct r8152 *tp = netdev_priv(netdev); 1882 struct r8152 *tp = netdev_priv(netdev);
1835 1883
1836 if (tp->speed & LINK_STATUS) { 1884 if (netif_carrier_ok(netdev)) {
1837 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 1885 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
1838 schedule_delayed_work(&tp->schedule, 0); 1886 schedule_delayed_work(&tp->schedule, 0);
1839 } 1887 }
@@ -1912,11 +1960,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
1912 1960
1913 if (!list_empty(&tp->tx_free)) { 1961 if (!list_empty(&tp->tx_free)) {
1914 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 1962 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
1915 set_bit(SCHEDULE_TASKLET, &tp->flags); 1963 set_bit(SCHEDULE_NAPI, &tp->flags);
1916 schedule_delayed_work(&tp->schedule, 0); 1964 schedule_delayed_work(&tp->schedule, 0);
1917 } else { 1965 } else {
1918 usb_mark_last_busy(tp->udev); 1966 usb_mark_last_busy(tp->udev);
1919 tasklet_schedule(&tp->tl); 1967 napi_schedule(&tp->napi);
1920 } 1968 }
1921 } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) { 1969 } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) {
1922 netif_stop_queue(netdev); 1970 netif_stop_queue(netdev);
@@ -1995,6 +2043,7 @@ static int rtl_start_rx(struct r8152 *tp)
1995{ 2043{
1996 int i, ret = 0; 2044 int i, ret = 0;
1997 2045
2046 napi_disable(&tp->napi);
1998 INIT_LIST_HEAD(&tp->rx_done); 2047 INIT_LIST_HEAD(&tp->rx_done);
1999 for (i = 0; i < RTL8152_MAX_RX; i++) { 2048 for (i = 0; i < RTL8152_MAX_RX; i++) {
2000 INIT_LIST_HEAD(&tp->rx_info[i].list); 2049 INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2002,6 +2051,7 @@ static int rtl_start_rx(struct r8152 *tp)
2002 if (ret) 2051 if (ret)
2003 break; 2052 break;
2004 } 2053 }
2054 napi_enable(&tp->napi);
2005 2055
2006 if (ret && ++i < RTL8152_MAX_RX) { 2056 if (ret && ++i < RTL8152_MAX_RX) {
2007 struct list_head rx_queue; 2057 struct list_head rx_queue;
@@ -2032,6 +2082,9 @@ static int rtl_stop_rx(struct r8152 *tp)
2032 for (i = 0; i < RTL8152_MAX_RX; i++) 2082 for (i = 0; i < RTL8152_MAX_RX; i++)
2033 usb_kill_urb(tp->rx_info[i].urb); 2083 usb_kill_urb(tp->rx_info[i].urb);
2034 2084
2085 while (!skb_queue_empty(&tp->rx_queue))
2086 dev_kfree_skb(__skb_dequeue(&tp->rx_queue));
2087
2035 return 0; 2088 return 0;
2036} 2089}
2037 2090
@@ -2047,7 +2100,7 @@ static int rtl_enable(struct r8152 *tp)
2047 2100
2048 rxdy_gated_en(tp, false); 2101 rxdy_gated_en(tp, false);
2049 2102
2050 return rtl_start_rx(tp); 2103 return 0;
2051} 2104}
2052 2105
2053static int rtl8152_enable(struct r8152 *tp) 2106static int rtl8152_enable(struct r8152 *tp)
@@ -2852,20 +2905,20 @@ static void set_carrier(struct r8152 *tp)
2852 speed = rtl8152_get_speed(tp); 2905 speed = rtl8152_get_speed(tp);
2853 2906
2854 if (speed & LINK_STATUS) { 2907 if (speed & LINK_STATUS) {
2855 if (!(tp->speed & LINK_STATUS)) { 2908 if (!netif_carrier_ok(netdev)) {
2856 tp->rtl_ops.enable(tp); 2909 tp->rtl_ops.enable(tp);
2857 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 2910 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
2858 netif_carrier_on(netdev); 2911 netif_carrier_on(netdev);
2912 rtl_start_rx(tp);
2859 } 2913 }
2860 } else { 2914 } else {
2861 if (tp->speed & LINK_STATUS) { 2915 if (netif_carrier_ok(netdev)) {
2862 netif_carrier_off(netdev); 2916 netif_carrier_off(netdev);
2863 tasklet_disable(&tp->tl); 2917 napi_disable(&tp->napi);
2864 tp->rtl_ops.disable(tp); 2918 tp->rtl_ops.disable(tp);
2865 tasklet_enable(&tp->tl); 2919 napi_enable(&tp->napi);
2866 } 2920 }
2867 } 2921 }
2868 tp->speed = speed;
2869} 2922}
2870 2923
2871static void rtl_work_func_t(struct work_struct *work) 2924static void rtl_work_func_t(struct work_struct *work)
@@ -2895,10 +2948,11 @@ static void rtl_work_func_t(struct work_struct *work)
2895 if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) 2948 if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
2896 _rtl8152_set_rx_mode(tp->netdev); 2949 _rtl8152_set_rx_mode(tp->netdev);
2897 2950
2898 if (test_bit(SCHEDULE_TASKLET, &tp->flags) && 2951 /* don't schedule napi before linking */
2899 (tp->speed & LINK_STATUS)) { 2952 if (test_bit(SCHEDULE_NAPI, &tp->flags) &&
2900 clear_bit(SCHEDULE_TASKLET, &tp->flags); 2953 netif_carrier_ok(tp->netdev)) {
2901 tasklet_schedule(&tp->tl); 2954 clear_bit(SCHEDULE_NAPI, &tp->flags);
2955 napi_schedule(&tp->napi);
2902 } 2956 }
2903 2957
2904 if (test_bit(PHY_RESET, &tp->flags)) 2958 if (test_bit(PHY_RESET, &tp->flags))
@@ -2919,8 +2973,7 @@ static int rtl8152_open(struct net_device *netdev)
2919 if (res) 2973 if (res)
2920 goto out; 2974 goto out;
2921 2975
2922 /* set speed to 0 to avoid autoresume try to submit rx */ 2976 netif_carrier_off(netdev);
2923 tp->speed = 0;
2924 2977
2925 res = usb_autopm_get_interface(tp->intf); 2978 res = usb_autopm_get_interface(tp->intf);
2926 if (res < 0) { 2979 if (res < 0) {
@@ -2937,7 +2990,7 @@ static int rtl8152_open(struct net_device *netdev)
2937 cancel_delayed_work_sync(&tp->schedule); 2990 cancel_delayed_work_sync(&tp->schedule);
2938 2991
2939 /* disable the tx/rx, if the workqueue has enabled them. */ 2992 /* disable the tx/rx, if the workqueue has enabled them. */
2940 if (tp->speed & LINK_STATUS) 2993 if (netif_carrier_ok(netdev))
2941 tp->rtl_ops.disable(tp); 2994 tp->rtl_ops.disable(tp);
2942 } 2995 }
2943 2996
@@ -2946,7 +2999,6 @@ static int rtl8152_open(struct net_device *netdev)
2946 rtl8152_set_speed(tp, AUTONEG_ENABLE, 2999 rtl8152_set_speed(tp, AUTONEG_ENABLE,
2947 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, 3000 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
2948 DUPLEX_FULL); 3001 DUPLEX_FULL);
2949 tp->speed = 0;
2950 netif_carrier_off(netdev); 3002 netif_carrier_off(netdev);
2951 netif_start_queue(netdev); 3003 netif_start_queue(netdev);
2952 set_bit(WORK_ENABLE, &tp->flags); 3004 set_bit(WORK_ENABLE, &tp->flags);
@@ -2959,7 +3011,7 @@ static int rtl8152_open(struct net_device *netdev)
2959 res); 3011 res);
2960 free_all_mem(tp); 3012 free_all_mem(tp);
2961 } else { 3013 } else {
2962 tasklet_enable(&tp->tl); 3014 napi_enable(&tp->napi);
2963 } 3015 }
2964 3016
2965 mutex_unlock(&tp->control); 3017 mutex_unlock(&tp->control);
@@ -2975,15 +3027,16 @@ static int rtl8152_close(struct net_device *netdev)
2975 struct r8152 *tp = netdev_priv(netdev); 3027 struct r8152 *tp = netdev_priv(netdev);
2976 int res = 0; 3028 int res = 0;
2977 3029
2978 tasklet_disable(&tp->tl); 3030 napi_disable(&tp->napi);
2979 clear_bit(WORK_ENABLE, &tp->flags); 3031 clear_bit(WORK_ENABLE, &tp->flags);
2980 usb_kill_urb(tp->intr_urb); 3032 usb_kill_urb(tp->intr_urb);
2981 cancel_delayed_work_sync(&tp->schedule); 3033 cancel_delayed_work_sync(&tp->schedule);
2982 netif_stop_queue(netdev); 3034 netif_stop_queue(netdev);
2983 3035
2984 res = usb_autopm_get_interface(tp->intf); 3036 res = usb_autopm_get_interface(tp->intf);
2985 if (res < 0) { 3037 if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) {
2986 rtl_drop_queued_tx(tp); 3038 rtl_drop_queued_tx(tp);
3039 rtl_stop_rx(tp);
2987 } else { 3040 } else {
2988 mutex_lock(&tp->control); 3041 mutex_lock(&tp->control);
2989 3042
@@ -3187,10 +3240,10 @@ static void r8153_init(struct r8152 *tp)
3187 3240
3188 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL); 3241 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL);
3189 ocp_data &= ~LPM_TIMER_MASK; 3242 ocp_data &= ~LPM_TIMER_MASK;
3190 if (tp->udev->speed == USB_SPEED_SUPER) 3243 if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER)
3191 ocp_data |= LPM_TIMER_500US;
3192 else
3193 ocp_data |= LPM_TIMER_500MS; 3244 ocp_data |= LPM_TIMER_500MS;
3245 else
3246 ocp_data |= LPM_TIMER_500US;
3194 ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data); 3247 ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data);
3195 3248
3196 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2); 3249 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2);
@@ -3239,7 +3292,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3239 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { 3292 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
3240 clear_bit(WORK_ENABLE, &tp->flags); 3293 clear_bit(WORK_ENABLE, &tp->flags);
3241 usb_kill_urb(tp->intr_urb); 3294 usb_kill_urb(tp->intr_urb);
3242 tasklet_disable(&tp->tl); 3295 napi_disable(&tp->napi);
3243 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3296 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3244 rtl_stop_rx(tp); 3297 rtl_stop_rx(tp);
3245 rtl_runtime_suspend_enable(tp, true); 3298 rtl_runtime_suspend_enable(tp, true);
@@ -3247,7 +3300,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3247 cancel_delayed_work_sync(&tp->schedule); 3300 cancel_delayed_work_sync(&tp->schedule);
3248 tp->rtl_ops.down(tp); 3301 tp->rtl_ops.down(tp);
3249 } 3302 }
3250 tasklet_enable(&tp->tl); 3303 napi_enable(&tp->napi);
3251 } 3304 }
3252out1: 3305out1:
3253 mutex_unlock(&tp->control); 3306 mutex_unlock(&tp->control);
@@ -3271,7 +3324,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3271 rtl_runtime_suspend_enable(tp, false); 3324 rtl_runtime_suspend_enable(tp, false);
3272 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3325 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3273 set_bit(WORK_ENABLE, &tp->flags); 3326 set_bit(WORK_ENABLE, &tp->flags);
3274 if (tp->speed & LINK_STATUS) 3327 if (netif_carrier_ok(tp->netdev))
3275 rtl_start_rx(tp); 3328 rtl_start_rx(tp);
3276 } else { 3329 } else {
3277 tp->rtl_ops.up(tp); 3330 tp->rtl_ops.up(tp);
@@ -3279,7 +3332,6 @@ static int rtl8152_resume(struct usb_interface *intf)
3279 tp->mii.supports_gmii ? 3332 tp->mii.supports_gmii ?
3280 SPEED_1000 : SPEED_100, 3333 SPEED_1000 : SPEED_100,
3281 DUPLEX_FULL); 3334 DUPLEX_FULL);
3282 tp->speed = 0;
3283 netif_carrier_off(tp->netdev); 3335 netif_carrier_off(tp->netdev);
3284 set_bit(WORK_ENABLE, &tp->flags); 3336 set_bit(WORK_ENABLE, &tp->flags);
3285 } 3337 }
@@ -3831,7 +3883,6 @@ static int rtl8152_probe(struct usb_interface *intf,
3831 if (ret) 3883 if (ret)
3832 goto out; 3884 goto out;
3833 3885
3834 tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
3835 mutex_init(&tp->control); 3886 mutex_init(&tp->control);
3836 INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); 3887 INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
3837 3888
@@ -3845,8 +3896,7 @@ static int rtl8152_probe(struct usb_interface *intf,
3845 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | 3896 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
3846 NETIF_F_TSO | NETIF_F_FRAGLIST | 3897 NETIF_F_TSO | NETIF_F_FRAGLIST |
3847 NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | 3898 NETIF_F_IPV6_CSUM | NETIF_F_TSO6 |
3848 NETIF_F_HW_VLAN_CTAG_RX | 3899 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
3849 NETIF_F_HW_VLAN_CTAG_TX;
3850 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 3900 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
3851 NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 3901 NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
3852 NETIF_F_IPV6_CSUM | NETIF_F_TSO6; 3902 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
@@ -3867,6 +3917,7 @@ static int rtl8152_probe(struct usb_interface *intf,
3867 set_ethernet_addr(tp); 3917 set_ethernet_addr(tp);
3868 3918
3869 usb_set_intfdata(intf, tp); 3919 usb_set_intfdata(intf, tp);
3920 netif_napi_add(netdev, &tp->napi, r8152_poll, RTL8152_NAPI_WEIGHT);
3870 3921
3871 ret = register_netdev(netdev); 3922 ret = register_netdev(netdev);
3872 if (ret != 0) { 3923 if (ret != 0) {
@@ -3880,15 +3931,13 @@ static int rtl8152_probe(struct usb_interface *intf,
3880 else 3931 else
3881 device_set_wakeup_enable(&udev->dev, false); 3932 device_set_wakeup_enable(&udev->dev, false);
3882 3933
3883 tasklet_disable(&tp->tl);
3884
3885 netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION); 3934 netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
3886 3935
3887 return 0; 3936 return 0;
3888 3937
3889out1: 3938out1:
3939 netif_napi_del(&tp->napi);
3890 usb_set_intfdata(intf, NULL); 3940 usb_set_intfdata(intf, NULL);
3891 tasklet_kill(&tp->tl);
3892out: 3941out:
3893 free_netdev(netdev); 3942 free_netdev(netdev);
3894 return ret; 3943 return ret;
@@ -3905,7 +3954,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
3905 if (udev->state == USB_STATE_NOTATTACHED) 3954 if (udev->state == USB_STATE_NOTATTACHED)
3906 set_bit(RTL8152_UNPLUG, &tp->flags); 3955 set_bit(RTL8152_UNPLUG, &tp->flags);
3907 3956
3908 tasklet_kill(&tp->tl); 3957 netif_napi_del(&tp->napi);
3909 unregister_netdev(tp->netdev); 3958 unregister_netdev(tp->netdev);
3910 tp->rtl_ops.unload(tp); 3959 tp->rtl_ops.unload(tp);
3911 free_netdev(tp->netdev); 3960 free_netdev(tp->netdev);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3a6770a65d78..449835f4331e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -160,20 +160,19 @@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
160 160
161int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) 161int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
162{ 162{
163 int tmp, i; 163 int tmp = -1, ret;
164 unsigned char buf [13]; 164 unsigned char buf [13];
165 165
166 tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf); 166 ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
167 if (tmp != 12) { 167 if (ret == 12)
168 tmp = hex2bin(dev->net->dev_addr, buf, 6);
169 if (tmp < 0) {
168 dev_dbg(&dev->udev->dev, 170 dev_dbg(&dev->udev->dev,
169 "bad MAC string %d fetch, %d\n", iMACAddress, tmp); 171 "bad MAC string %d fetch, %d\n", iMACAddress, tmp);
170 if (tmp >= 0) 172 if (ret >= 0)
171 tmp = -EINVAL; 173 ret = -EINVAL;
172 return tmp; 174 return ret;
173 } 175 }
174 for (i = tmp = 0; i < 6; i++, tmp += 2)
175 dev->net->dev_addr [i] =
176 (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]);
177 return 0; 176 return 0;
178} 177}
179EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); 178EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8ad596573d17..4cca36ebc4fb 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -469,6 +469,14 @@ static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
469 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, 469 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
470}; 470};
471 471
472static struct net *veth_get_link_net(const struct net_device *dev)
473{
474 struct veth_priv *priv = netdev_priv(dev);
475 struct net_device *peer = rtnl_dereference(priv->peer);
476
477 return peer ? dev_net(peer) : dev_net(dev);
478}
479
472static struct rtnl_link_ops veth_link_ops = { 480static struct rtnl_link_ops veth_link_ops = {
473 .kind = DRV_NAME, 481 .kind = DRV_NAME,
474 .priv_size = sizeof(struct veth_priv), 482 .priv_size = sizeof(struct veth_priv),
@@ -478,6 +486,7 @@ static struct rtnl_link_ops veth_link_ops = {
478 .dellink = veth_dellink, 486 .dellink = veth_dellink,
479 .policy = veth_policy, 487 .policy = veth_policy,
480 .maxtype = VETH_INFO_MAX, 488 .maxtype = VETH_INFO_MAX,
489 .get_link_net = veth_get_link_net,
481}; 490};
482 491
483/* 492/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 059fdf1bf5ee..110a2cf67244 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -918,6 +918,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
918 /* Free up any pending old buffers before queueing new ones. */ 918 /* Free up any pending old buffers before queueing new ones. */
919 free_old_xmit_skbs(sq); 919 free_old_xmit_skbs(sq);
920 920
921 /* timestamp packet in software */
922 skb_tx_timestamp(skb);
923
921 /* Try to transmit */ 924 /* Try to transmit */
922 err = xmit_skb(sq, skb); 925 err = xmit_skb(sq, skb);
923 926
@@ -1369,6 +1372,7 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
1369 .get_ringparam = virtnet_get_ringparam, 1372 .get_ringparam = virtnet_get_ringparam,
1370 .set_channels = virtnet_set_channels, 1373 .set_channels = virtnet_set_channels,
1371 .get_channels = virtnet_get_channels, 1374 .get_channels = virtnet_get_channels,
1375 .get_ts_info = ethtool_op_get_ts_info,
1372}; 1376};
1373 1377
1374#define MIN_MTU 68 1378#define MIN_MTU 68
@@ -1754,6 +1758,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1754 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 1758 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1755 dev->hw_features |= NETIF_F_UFO; 1759 dev->hw_features |= NETIF_F_UFO;
1756 1760
1761 dev->features |= NETIF_F_GSO_ROBUST;
1762
1757 if (gso) 1763 if (gso)
1758 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 1764 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1759 /* (!csum && gso) case will be fixed by register_netdev() */ 1765 /* (!csum && gso) case will be fixed by register_netdev() */
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 4d84912c99ba..3718d024f638 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -342,6 +342,7 @@ union Vmxnet3_GenericDesc {
342#define VMXNET3_TX_RING_MAX_SIZE 4096 342#define VMXNET3_TX_RING_MAX_SIZE 4096
343#define VMXNET3_TC_RING_MAX_SIZE 4096 343#define VMXNET3_TC_RING_MAX_SIZE 4096
344#define VMXNET3_RX_RING_MAX_SIZE 4096 344#define VMXNET3_RX_RING_MAX_SIZE 4096
345#define VMXNET3_RX_RING2_MAX_SIZE 2048
345#define VMXNET3_RC_RING_MAX_SIZE 8192 346#define VMXNET3_RC_RING_MAX_SIZE 8192
346 347
347/* a list of reasons for queue stop */ 348/* a list of reasons for queue stop */
@@ -392,7 +393,7 @@ struct Vmxnet3_DriverInfo {
392}; 393};
393 394
394 395
395#define VMXNET3_REV1_MAGIC 0xbabefee1 396#define VMXNET3_REV1_MAGIC 3133079265u
396 397
397/* 398/*
398 * QueueDescPA must be 128 bytes aligned. It points to an array of 399 * QueueDescPA must be 128 bytes aligned. It points to an array of
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index afd295348ddb..294214c15292 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1038,9 +1038,9 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1038 le32_add_cpu(&tq->shared->txNumDeferred, 1); 1038 le32_add_cpu(&tq->shared->txNumDeferred, 1);
1039 } 1039 }
1040 1040
1041 if (vlan_tx_tag_present(skb)) { 1041 if (skb_vlan_tag_present(skb)) {
1042 gdesc->txd.ti = 1; 1042 gdesc->txd.ti = 1;
1043 gdesc->txd.tci = vlan_tx_tag_get(skb); 1043 gdesc->txd.tci = skb_vlan_tag_get(skb);
1044 } 1044 }
1045 1045
1046 /* finally flips the GEN bit of the SOP desc. */ 1046 /* finally flips the GEN bit of the SOP desc. */
@@ -2505,6 +2505,9 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2505 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE / 2505 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2506 sz * sz); 2506 sz * sz);
2507 ring1_size = adapter->rx_queue[0].rx_ring[1].size; 2507 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2508 ring1_size = (ring1_size + sz - 1) / sz * sz;
2509 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
2510 sz * sz);
2508 comp_size = ring0_size + ring1_size; 2511 comp_size = ring0_size + ring1_size;
2509 2512
2510 for (i = 0; i < adapter->num_rx_queues; i++) { 2513 for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -2585,7 +2588,7 @@ vmxnet3_open(struct net_device *netdev)
2585 2588
2586 err = vmxnet3_create_queues(adapter, adapter->tx_ring_size, 2589 err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
2587 adapter->rx_ring_size, 2590 adapter->rx_ring_size,
2588 VMXNET3_DEF_RX_RING_SIZE); 2591 adapter->rx_ring2_size);
2589 if (err) 2592 if (err)
2590 goto queue_err; 2593 goto queue_err;
2591 2594
@@ -2964,6 +2967,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2964 2967
2965 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; 2968 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
2966 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; 2969 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
2970 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
2967 2971
2968 spin_lock_init(&adapter->cmd_lock); 2972 spin_lock_init(&adapter->cmd_lock);
2969 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, 2973 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
@@ -3286,27 +3290,15 @@ skip_arp:
3286static int 3290static int
3287vmxnet3_resume(struct device *device) 3291vmxnet3_resume(struct device *device)
3288{ 3292{
3289 int err, i = 0; 3293 int err;
3290 unsigned long flags; 3294 unsigned long flags;
3291 struct pci_dev *pdev = to_pci_dev(device); 3295 struct pci_dev *pdev = to_pci_dev(device);
3292 struct net_device *netdev = pci_get_drvdata(pdev); 3296 struct net_device *netdev = pci_get_drvdata(pdev);
3293 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3297 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3294 struct Vmxnet3_PMConf *pmConf;
3295 3298
3296 if (!netif_running(netdev)) 3299 if (!netif_running(netdev))
3297 return 0; 3300 return 0;
3298 3301
3299 /* Destroy wake-up filters. */
3300 pmConf = adapter->pm_conf;
3301 memset(pmConf, 0, sizeof(*pmConf));
3302
3303 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3304 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3305 *pmConf));
3306 adapter->shared->devRead.pmConfDesc.confPA =
3307 cpu_to_le64(adapter->pm_conf_pa);
3308
3309 netif_device_attach(netdev);
3310 pci_set_power_state(pdev, PCI_D0); 3302 pci_set_power_state(pdev, PCI_D0);
3311 pci_restore_state(pdev); 3303 pci_restore_state(pdev);
3312 err = pci_enable_device_mem(pdev); 3304 err = pci_enable_device_mem(pdev);
@@ -3315,15 +3307,31 @@ vmxnet3_resume(struct device *device)
3315 3307
3316 pci_enable_wake(pdev, PCI_D0, 0); 3308 pci_enable_wake(pdev, PCI_D0, 0);
3317 3309
3310 vmxnet3_alloc_intr_resources(adapter);
3311
3312 /* During hibernate and suspend, device has to be reinitialized as the
3313 * device state need not be preserved.
3314 */
3315
3316 /* Need not check adapter state as other reset tasks cannot run during
3317 * device resume.
3318 */
3318 spin_lock_irqsave(&adapter->cmd_lock, flags); 3319 spin_lock_irqsave(&adapter->cmd_lock, flags);
3319 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3320 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3320 VMXNET3_CMD_UPDATE_PMCFG); 3321 VMXNET3_CMD_QUIESCE_DEV);
3321 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 3322 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3322 vmxnet3_alloc_intr_resources(adapter); 3323 vmxnet3_tq_cleanup_all(adapter);
3323 vmxnet3_request_irqs(adapter); 3324 vmxnet3_rq_cleanup_all(adapter);
3324 for (i = 0; i < adapter->num_rx_queues; i++) 3325
3325 napi_enable(&adapter->rx_queue[i].napi); 3326 vmxnet3_reset_dev(adapter);
3326 vmxnet3_enable_all_intrs(adapter); 3327 err = vmxnet3_activate_dev(adapter);
3328 if (err != 0) {
3329 netdev_err(netdev,
3330 "failed to re-activate on resume, error: %d", err);
3331 vmxnet3_force_close(adapter);
3332 return err;
3333 }
3334 netif_device_attach(netdev);
3327 3335
3328 return 0; 3336 return 0;
3329} 3337}
@@ -3331,6 +3339,8 @@ vmxnet3_resume(struct device *device)
3331static const struct dev_pm_ops vmxnet3_pm_ops = { 3339static const struct dev_pm_ops vmxnet3_pm_ops = {
3332 .suspend = vmxnet3_suspend, 3340 .suspend = vmxnet3_suspend,
3333 .resume = vmxnet3_resume, 3341 .resume = vmxnet3_resume,
3342 .freeze = vmxnet3_suspend,
3343 .restore = vmxnet3_resume,
3334}; 3344};
3335#endif 3345#endif
3336 3346
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b7b53329d575..4c8a944d58b4 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -323,7 +323,7 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
323 vmxnet3_tq_driver_stats[i].offset); 323 vmxnet3_tq_driver_stats[i].offset);
324 } 324 }
325 325
326 for (j = 0; j < adapter->num_tx_queues; j++) { 326 for (j = 0; j < adapter->num_rx_queues; j++) {
327 base = (u8 *)&adapter->rqd_start[j].stats; 327 base = (u8 *)&adapter->rqd_start[j].stats;
328 *buf++ = (u64) j; 328 *buf++ = (u64) j;
329 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 329 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
@@ -447,12 +447,12 @@ vmxnet3_get_ringparam(struct net_device *netdev,
447 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; 447 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
448 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; 448 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
449 param->rx_mini_max_pending = 0; 449 param->rx_mini_max_pending = 0;
450 param->rx_jumbo_max_pending = 0; 450 param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
451 451
452 param->rx_pending = adapter->rx_ring_size; 452 param->rx_pending = adapter->rx_ring_size;
453 param->tx_pending = adapter->tx_ring_size; 453 param->tx_pending = adapter->tx_ring_size;
454 param->rx_mini_pending = 0; 454 param->rx_mini_pending = 0;
455 param->rx_jumbo_pending = 0; 455 param->rx_jumbo_pending = adapter->rx_ring2_size;
456} 456}
457 457
458 458
@@ -461,7 +461,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
461 struct ethtool_ringparam *param) 461 struct ethtool_ringparam *param)
462{ 462{
463 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 463 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
464 u32 new_tx_ring_size, new_rx_ring_size; 464 u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
465 u32 sz; 465 u32 sz;
466 int err = 0; 466 int err = 0;
467 467
@@ -473,6 +473,10 @@ vmxnet3_set_ringparam(struct net_device *netdev,
473 VMXNET3_RX_RING_MAX_SIZE) 473 VMXNET3_RX_RING_MAX_SIZE)
474 return -EINVAL; 474 return -EINVAL;
475 475
476 if (param->rx_jumbo_pending == 0 ||
477 param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE)
478 return -EINVAL;
479
476 /* if adapter not yet initialized, do nothing */ 480 /* if adapter not yet initialized, do nothing */
477 if (adapter->rx_buf_per_pkt == 0) { 481 if (adapter->rx_buf_per_pkt == 0) {
478 netdev_err(netdev, "adapter not completely initialized, " 482 netdev_err(netdev, "adapter not completely initialized, "
@@ -500,8 +504,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
500 sz) != 0) 504 sz) != 0)
501 return -EINVAL; 505 return -EINVAL;
502 506
503 if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size && 507 /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */
504 new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) { 508 new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) &
509 ~VMXNET3_RING_SIZE_MASK;
510 new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
511 VMXNET3_RX_RING2_MAX_SIZE);
512
513 if (new_tx_ring_size == adapter->tx_ring_size &&
514 new_rx_ring_size == adapter->rx_ring_size &&
515 new_rx_ring2_size == adapter->rx_ring2_size) {
505 return 0; 516 return 0;
506 } 517 }
507 518
@@ -522,7 +533,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
522 vmxnet3_rq_destroy_all(adapter); 533 vmxnet3_rq_destroy_all(adapter);
523 534
524 err = vmxnet3_create_queues(adapter, new_tx_ring_size, 535 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
525 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); 536 new_rx_ring_size, new_rx_ring2_size);
526 537
527 if (err) { 538 if (err) {
528 /* failed, most likely because of OOM, try default 539 /* failed, most likely because of OOM, try default
@@ -530,11 +541,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
530 netdev_err(netdev, "failed to apply new sizes, " 541 netdev_err(netdev, "failed to apply new sizes, "
531 "try the default ones\n"); 542 "try the default ones\n");
532 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; 543 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
544 new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
533 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; 545 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
534 err = vmxnet3_create_queues(adapter, 546 err = vmxnet3_create_queues(adapter,
535 new_tx_ring_size, 547 new_tx_ring_size,
536 new_rx_ring_size, 548 new_rx_ring_size,
537 VMXNET3_DEF_RX_RING_SIZE); 549 new_rx_ring2_size);
538 if (err) { 550 if (err) {
539 netdev_err(netdev, "failed to create queues " 551 netdev_err(netdev, "failed to create queues "
540 "with default sizes. Closing it\n"); 552 "with default sizes. Closing it\n");
@@ -549,6 +561,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
549 } 561 }
550 adapter->tx_ring_size = new_tx_ring_size; 562 adapter->tx_ring_size = new_tx_ring_size;
551 adapter->rx_ring_size = new_rx_ring_size; 563 adapter->rx_ring_size = new_rx_ring_size;
564 adapter->rx_ring2_size = new_rx_ring2_size;
552 565
553out: 566out:
554 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 567 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 5f0199f6c31e..cd71c77f78f2 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.3.4.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01020100 75#define VMXNET3_DRIVER_VERSION_NUM 0x01030400
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
@@ -352,6 +352,7 @@ struct vmxnet3_adapter {
352 /* Ring sizes */ 352 /* Ring sizes */
353 u32 tx_ring_size; 353 u32 tx_ring_size;
354 u32 rx_ring_size; 354 u32 rx_ring_size;
355 u32 rx_ring2_size;
355 356
356 struct work_struct work; 357 struct work_struct work;
357 358
@@ -384,6 +385,7 @@ struct vmxnet3_adapter {
384/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ 385/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
385#define VMXNET3_DEF_TX_RING_SIZE 512 386#define VMXNET3_DEF_TX_RING_SIZE 512
386#define VMXNET3_DEF_RX_RING_SIZE 256 387#define VMXNET3_DEF_RX_RING_SIZE 256
388#define VMXNET3_DEF_RX_RING2_SIZE 128
387 389
388#define VMXNET3_MAX_ETH_HDR_SIZE 22 390#define VMXNET3_MAX_ETH_HDR_SIZE 22
389#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 391#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a8c755dcab14..0e57e862c399 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -61,12 +61,6 @@
61#define FDB_AGE_DEFAULT 300 /* 5 min */ 61#define FDB_AGE_DEFAULT 300 /* 5 min */
62#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ 62#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
63 63
64#define VXLAN_N_VID (1u << 24)
65#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
66#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
67
68#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
69
70/* UDP port for VXLAN traffic. 64/* UDP port for VXLAN traffic.
71 * The IANA assigned port is 4789, but the Linux default is 8472 65 * The IANA assigned port is 4789, but the Linux default is 8472
72 * for compatibility with early adopters. 66 * for compatibility with early adopters.
@@ -269,15 +263,20 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
269 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); 263 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
270} 264}
271 265
272/* Find VXLAN socket based on network namespace, address family and UDP port */ 266/* Find VXLAN socket based on network namespace, address family and UDP port
273static struct vxlan_sock *vxlan_find_sock(struct net *net, 267 * and enabled unshareable flags.
274 sa_family_t family, __be16 port) 268 */
269static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
270 __be16 port, u32 flags)
275{ 271{
276 struct vxlan_sock *vs; 272 struct vxlan_sock *vs;
277 273
274 flags &= VXLAN_F_RCV_FLAGS;
275
278 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 276 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
279 if (inet_sk(vs->sock->sk)->inet_sport == port && 277 if (inet_sk(vs->sock->sk)->inet_sport == port &&
280 inet_sk(vs->sock->sk)->sk.sk_family == family) 278 inet_sk(vs->sock->sk)->sk.sk_family == family &&
279 vs->flags == flags)
281 return vs; 280 return vs;
282 } 281 }
283 return NULL; 282 return NULL;
@@ -297,11 +296,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
297 296
298/* Look up VNI in a per net namespace table */ 297/* Look up VNI in a per net namespace table */
299static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, 298static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
300 sa_family_t family, __be16 port) 299 sa_family_t family, __be16 port,
300 u32 flags)
301{ 301{
302 struct vxlan_sock *vs; 302 struct vxlan_sock *vs;
303 303
304 vs = vxlan_find_sock(net, family, port); 304 vs = vxlan_find_sock(net, family, port, flags);
305 if (!vs) 305 if (!vs)
306 return NULL; 306 return NULL;
307 307
@@ -340,6 +340,11 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
340 ndm->ndm_flags = fdb->flags; 340 ndm->ndm_flags = fdb->flags;
341 ndm->ndm_type = RTN_UNICAST; 341 ndm->ndm_type = RTN_UNICAST;
342 342
343 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
344 nla_put_s32(skb, NDA_LINK_NETNSID,
345 peernet2id(dev_net(vxlan->dev), vxlan->net)))
346 goto nla_put_failure;
347
343 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 348 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
344 goto nla_put_failure; 349 goto nla_put_failure;
345 350
@@ -364,7 +369,8 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
364 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 369 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
365 goto nla_put_failure; 370 goto nla_put_failure;
366 371
367 return nlmsg_end(skb, nlh); 372 nlmsg_end(skb, nlh);
373 return 0;
368 374
369nla_put_failure: 375nla_put_failure:
370 nlmsg_cancel(skb, nlh); 376 nlmsg_cancel(skb, nlh);
@@ -379,6 +385,7 @@ static inline size_t vxlan_nlmsg_size(void)
379 + nla_total_size(sizeof(__be16)) /* NDA_PORT */ 385 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
380 + nla_total_size(sizeof(__be32)) /* NDA_VNI */ 386 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
381 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ 387 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
388 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
382 + nla_total_size(sizeof(struct nda_cacheinfo)); 389 + nla_total_size(sizeof(struct nda_cacheinfo));
383} 390}
384 391
@@ -545,15 +552,51 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
545 return 1; 552 return 1;
546} 553}
547 554
548static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb) 555static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
556 unsigned int off,
557 struct vxlanhdr *vh, size_t hdrlen,
558 u32 data)
559{
560 size_t start, offset, plen;
561
562 if (skb->remcsum_offload)
563 return vh;
564
565 if (!NAPI_GRO_CB(skb)->csum_valid)
566 return NULL;
567
568 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
569 offset = start + ((data & VXLAN_RCO_UDP) ?
570 offsetof(struct udphdr, check) :
571 offsetof(struct tcphdr, check));
572
573 plen = hdrlen + offset + sizeof(u16);
574
575 /* Pull checksum that will be written */
576 if (skb_gro_header_hard(skb, off + plen)) {
577 vh = skb_gro_header_slow(skb, off + plen, off);
578 if (!vh)
579 return NULL;
580 }
581
582 skb_gro_remcsum_process(skb, (void *)vh + hdrlen, start, offset);
583
584 skb->remcsum_offload = 1;
585
586 return vh;
587}
588
589static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
590 struct sk_buff *skb,
591 struct udp_offload *uoff)
549{ 592{
550 struct sk_buff *p, **pp = NULL; 593 struct sk_buff *p, **pp = NULL;
551 struct vxlanhdr *vh, *vh2; 594 struct vxlanhdr *vh, *vh2;
552 struct ethhdr *eh, *eh2; 595 unsigned int hlen, off_vx;
553 unsigned int hlen, off_vx, off_eth;
554 const struct packet_offload *ptype;
555 __be16 type;
556 int flush = 1; 596 int flush = 1;
597 struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
598 udp_offloads);
599 u32 flags;
557 600
558 off_vx = skb_gro_offset(skb); 601 off_vx = skb_gro_offset(skb);
559 hlen = off_vx + sizeof(*vh); 602 hlen = off_vx + sizeof(*vh);
@@ -563,15 +606,17 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
563 if (unlikely(!vh)) 606 if (unlikely(!vh))
564 goto out; 607 goto out;
565 } 608 }
609
566 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 610 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
567 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); 611 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
568 612
569 off_eth = skb_gro_offset(skb); 613 flags = ntohl(vh->vx_flags);
570 hlen = off_eth + sizeof(*eh); 614
571 eh = skb_gro_header_fast(skb, off_eth); 615 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
572 if (skb_gro_header_hard(skb, hlen)) { 616 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
573 eh = skb_gro_header_slow(skb, hlen, off_eth); 617 ntohl(vh->vx_vni));
574 if (unlikely(!eh)) 618
619 if (!vh)
575 goto out; 620 goto out;
576 } 621 }
577 622
@@ -582,54 +627,27 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
582 continue; 627 continue;
583 628
584 vh2 = (struct vxlanhdr *)(p->data + off_vx); 629 vh2 = (struct vxlanhdr *)(p->data + off_vx);
585 eh2 = (struct ethhdr *)(p->data + off_eth); 630 if (vh->vx_flags != vh2->vx_flags ||
586 if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) { 631 vh->vx_vni != vh2->vx_vni) {
587 NAPI_GRO_CB(p)->same_flow = 0; 632 NAPI_GRO_CB(p)->same_flow = 0;
588 continue; 633 continue;
589 } 634 }
590 } 635 }
591 636
592 type = eh->h_proto; 637 pp = eth_gro_receive(head, skb);
593
594 rcu_read_lock();
595 ptype = gro_find_receive_by_type(type);
596 if (ptype == NULL) {
597 flush = 1;
598 goto out_unlock;
599 }
600 638
601 skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
602 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
603 pp = ptype->callbacks.gro_receive(head, skb);
604
605out_unlock:
606 rcu_read_unlock();
607out: 639out:
608 NAPI_GRO_CB(skb)->flush |= flush; 640 NAPI_GRO_CB(skb)->flush |= flush;
609 641
610 return pp; 642 return pp;
611} 643}
612 644
613static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) 645static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
646 struct udp_offload *uoff)
614{ 647{
615 struct ethhdr *eh;
616 struct packet_offload *ptype;
617 __be16 type;
618 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
619 int err = -ENOSYS;
620
621 udp_tunnel_gro_complete(skb, nhoff); 648 udp_tunnel_gro_complete(skb, nhoff);
622 649
623 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); 650 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
624 type = eh->h_proto;
625
626 rcu_read_lock();
627 ptype = gro_find_complete_by_type(type);
628 if (ptype != NULL)
629 err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len);
630
631 rcu_read_unlock();
632 return err;
633} 651}
634 652
635/* Notify netdevs that UDP port started listening */ 653/* Notify netdevs that UDP port started listening */
@@ -991,7 +1009,7 @@ static bool vxlan_snoop(struct net_device *dev,
991 if (net_ratelimit()) 1009 if (net_ratelimit())
992 netdev_info(dev, 1010 netdev_info(dev,
993 "%pM migrated from %pIS to %pIS\n", 1011 "%pM migrated from %pIS to %pIS\n",
994 src_mac, &rdst->remote_ip, &src_ip); 1012 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
995 1013
996 rdst->remote_ip = *src_ip; 1014 rdst->remote_ip = *src_ip;
997 f->updated = jiffies; 1015 f->updated = jiffies;
@@ -1131,33 +1149,107 @@ static void vxlan_igmp_leave(struct work_struct *work)
1131 dev_put(vxlan->dev); 1149 dev_put(vxlan->dev);
1132} 1150}
1133 1151
1152static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
1153 size_t hdrlen, u32 data)
1154{
1155 size_t start, offset, plen;
1156
1157 if (skb->remcsum_offload) {
1158 /* Already processed in GRO path */
1159 skb->remcsum_offload = 0;
1160 return vh;
1161 }
1162
1163 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
1164 offset = start + ((data & VXLAN_RCO_UDP) ?
1165 offsetof(struct udphdr, check) :
1166 offsetof(struct tcphdr, check));
1167
1168 plen = hdrlen + offset + sizeof(u16);
1169
1170 if (!pskb_may_pull(skb, plen))
1171 return NULL;
1172
1173 vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1174
1175 skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset);
1176
1177 return vh;
1178}
1179
1134/* Callback from net/ipv4/udp.c to receive packets */ 1180/* Callback from net/ipv4/udp.c to receive packets */
1135static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 1181static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1136{ 1182{
1137 struct vxlan_sock *vs; 1183 struct vxlan_sock *vs;
1138 struct vxlanhdr *vxh; 1184 struct vxlanhdr *vxh;
1185 u32 flags, vni;
1186 struct vxlan_metadata md = {0};
1139 1187
1140 /* Need Vxlan and inner Ethernet header to be present */ 1188 /* Need Vxlan and inner Ethernet header to be present */
1141 if (!pskb_may_pull(skb, VXLAN_HLEN)) 1189 if (!pskb_may_pull(skb, VXLAN_HLEN))
1142 goto error; 1190 goto error;
1143 1191
1144 /* Return packets with reserved bits set */
1145 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1); 1192 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1146 if (vxh->vx_flags != htonl(VXLAN_FLAGS) || 1193 flags = ntohl(vxh->vx_flags);
1147 (vxh->vx_vni & htonl(0xff))) { 1194 vni = ntohl(vxh->vx_vni);
1148 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", 1195
1149 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni)); 1196 if (flags & VXLAN_HF_VNI) {
1150 goto error; 1197 flags &= ~VXLAN_HF_VNI;
1198 } else {
1199 /* VNI flag always required to be set */
1200 goto bad_flags;
1151 } 1201 }
1152 1202
1153 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) 1203 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1154 goto drop; 1204 goto drop;
1205 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1155 1206
1156 vs = rcu_dereference_sk_user_data(sk); 1207 vs = rcu_dereference_sk_user_data(sk);
1157 if (!vs) 1208 if (!vs)
1158 goto drop; 1209 goto drop;
1159 1210
1160 vs->rcv(vs, skb, vxh->vx_vni); 1211 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
1212 vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni);
1213 if (!vxh)
1214 goto drop;
1215
1216 flags &= ~VXLAN_HF_RCO;
1217 vni &= VXLAN_VID_MASK;
1218 }
1219
1220 /* For backwards compatibility, only allow reserved fields to be
1221 * used by VXLAN extensions if explicitly requested.
1222 */
1223 if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
1224 struct vxlanhdr_gbp *gbp;
1225
1226 gbp = (struct vxlanhdr_gbp *)vxh;
1227 md.gbp = ntohs(gbp->policy_id);
1228
1229 if (gbp->dont_learn)
1230 md.gbp |= VXLAN_GBP_DONT_LEARN;
1231
1232 if (gbp->policy_applied)
1233 md.gbp |= VXLAN_GBP_POLICY_APPLIED;
1234
1235 flags &= ~VXLAN_GBP_USED_BITS;
1236 }
1237
1238 if (flags || (vni & ~VXLAN_VID_MASK)) {
1239 /* If there are any unprocessed flags remaining treat
1240 * this as a malformed packet. This behavior diverges from
1241 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1242 * in reserved fields are to be ignored. The approach here
1243 * maintains compatbility with previous stack code, and also
1244 * is more robust and provides a little more security in
1245 * adding extensions to VXLAN.
1246 */
1247
1248 goto bad_flags;
1249 }
1250
1251 md.vni = vxh->vx_vni;
1252 vs->rcv(vs, skb, &md);
1161 return 0; 1253 return 0;
1162 1254
1163drop: 1255drop:
@@ -1165,13 +1257,17 @@ drop:
1165 kfree_skb(skb); 1257 kfree_skb(skb);
1166 return 0; 1258 return 0;
1167 1259
1260bad_flags:
1261 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1262 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
1263
1168error: 1264error:
1169 /* Return non vxlan pkt */ 1265 /* Return non vxlan pkt */
1170 return 1; 1266 return 1;
1171} 1267}
1172 1268
1173static void vxlan_rcv(struct vxlan_sock *vs, 1269static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1174 struct sk_buff *skb, __be32 vx_vni) 1270 struct vxlan_metadata *md)
1175{ 1271{
1176 struct iphdr *oip = NULL; 1272 struct iphdr *oip = NULL;
1177 struct ipv6hdr *oip6 = NULL; 1273 struct ipv6hdr *oip6 = NULL;
@@ -1182,7 +1278,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
1182 int err = 0; 1278 int err = 0;
1183 union vxlan_addr *remote_ip; 1279 union vxlan_addr *remote_ip;
1184 1280
1185 vni = ntohl(vx_vni) >> 8; 1281 vni = ntohl(md->vni) >> 8;
1186 /* Is this VNI defined? */ 1282 /* Is this VNI defined? */
1187 vxlan = vxlan_vs_find_vni(vs, vni); 1283 vxlan = vxlan_vs_find_vni(vs, vni);
1188 if (!vxlan) 1284 if (!vxlan)
@@ -1216,6 +1312,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
1216 goto drop; 1312 goto drop;
1217 1313
1218 skb_reset_network_header(skb); 1314 skb_reset_network_header(skb);
1315 skb->mark = md->gbp;
1219 1316
1220 if (oip6) 1317 if (oip6)
1221 err = IP6_ECN_decapsulate(oip6, skb); 1318 err = IP6_ECN_decapsulate(oip6, skb);
@@ -1565,20 +1662,54 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1565 return false; 1662 return false;
1566} 1663}
1567 1664
1665static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1666 struct vxlan_metadata *md)
1667{
1668 struct vxlanhdr_gbp *gbp;
1669
1670 if (!md->gbp)
1671 return;
1672
1673 gbp = (struct vxlanhdr_gbp *)vxh;
1674 vxh->vx_flags |= htonl(VXLAN_HF_GBP);
1675
1676 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1677 gbp->dont_learn = 1;
1678
1679 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1680 gbp->policy_applied = 1;
1681
1682 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1683}
1684
1568#if IS_ENABLED(CONFIG_IPV6) 1685#if IS_ENABLED(CONFIG_IPV6)
1569static int vxlan6_xmit_skb(struct vxlan_sock *vs, 1686static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
1570 struct dst_entry *dst, struct sk_buff *skb,
1571 struct net_device *dev, struct in6_addr *saddr, 1687 struct net_device *dev, struct in6_addr *saddr,
1572 struct in6_addr *daddr, __u8 prio, __u8 ttl, 1688 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1573 __be16 src_port, __be16 dst_port, __be32 vni, 1689 __be16 src_port, __be16 dst_port,
1574 bool xnet) 1690 struct vxlan_metadata *md, bool xnet, u32 vxflags)
1575{ 1691{
1576 struct vxlanhdr *vxh; 1692 struct vxlanhdr *vxh;
1577 int min_headroom; 1693 int min_headroom;
1578 int err; 1694 int err;
1579 bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk); 1695 bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
1696 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1697 u16 hdrlen = sizeof(struct vxlanhdr);
1698
1699 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1700 skb->ip_summed == CHECKSUM_PARTIAL) {
1701 int csum_start = skb_checksum_start_offset(skb);
1702
1703 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1704 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1705 (skb->csum_offset == offsetof(struct udphdr, check) ||
1706 skb->csum_offset == offsetof(struct tcphdr, check))) {
1707 udp_sum = false;
1708 type |= SKB_GSO_TUNNEL_REMCSUM;
1709 }
1710 }
1580 1711
1581 skb = udp_tunnel_handle_offloads(skb, udp_sum); 1712 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1582 if (IS_ERR(skb)) { 1713 if (IS_ERR(skb)) {
1583 err = -EINVAL; 1714 err = -EINVAL;
1584 goto err; 1715 goto err;
@@ -1588,7 +1719,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1588 1719
1589 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 1720 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1590 + VXLAN_HLEN + sizeof(struct ipv6hdr) 1721 + VXLAN_HLEN + sizeof(struct ipv6hdr)
1591 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 1722 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1592 1723
1593 /* Need space for new headers (invalidates iph ptr) */ 1724 /* Need space for new headers (invalidates iph ptr) */
1594 err = skb_cow_head(skb, min_headroom); 1725 err = skb_cow_head(skb, min_headroom);
@@ -1604,13 +1735,33 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1604 } 1735 }
1605 1736
1606 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1737 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1607 vxh->vx_flags = htonl(VXLAN_FLAGS); 1738 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1608 vxh->vx_vni = vni; 1739 vxh->vx_vni = md->vni;
1740
1741 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1742 u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
1743 VXLAN_RCO_SHIFT;
1744
1745 if (skb->csum_offset == offsetof(struct udphdr, check))
1746 data |= VXLAN_RCO_UDP;
1747
1748 vxh->vx_vni |= htonl(data);
1749 vxh->vx_flags |= htonl(VXLAN_HF_RCO);
1750
1751 if (!skb_is_gso(skb)) {
1752 skb->ip_summed = CHECKSUM_NONE;
1753 skb->encapsulation = 0;
1754 }
1755 }
1756
1757 if (vxflags & VXLAN_F_GBP)
1758 vxlan_build_gbp_hdr(vxh, vxflags, md);
1609 1759
1610 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 1760 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1611 1761
1612 udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio, 1762 udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio,
1613 ttl, src_port, dst_port); 1763 ttl, src_port, dst_port,
1764 !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
1614 return 0; 1765 return 0;
1615err: 1766err:
1616 dst_release(dst); 1767 dst_release(dst);
@@ -1618,23 +1769,38 @@ err:
1618} 1769}
1619#endif 1770#endif
1620 1771
1621int vxlan_xmit_skb(struct vxlan_sock *vs, 1772int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
1622 struct rtable *rt, struct sk_buff *skb,
1623 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 1773 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1624 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet) 1774 __be16 src_port, __be16 dst_port,
1775 struct vxlan_metadata *md, bool xnet, u32 vxflags)
1625{ 1776{
1626 struct vxlanhdr *vxh; 1777 struct vxlanhdr *vxh;
1627 int min_headroom; 1778 int min_headroom;
1628 int err; 1779 int err;
1629 bool udp_sum = !vs->sock->sk->sk_no_check_tx; 1780 bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
1781 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1782 u16 hdrlen = sizeof(struct vxlanhdr);
1783
1784 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1785 skb->ip_summed == CHECKSUM_PARTIAL) {
1786 int csum_start = skb_checksum_start_offset(skb);
1787
1788 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1789 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1790 (skb->csum_offset == offsetof(struct udphdr, check) ||
1791 skb->csum_offset == offsetof(struct tcphdr, check))) {
1792 udp_sum = false;
1793 type |= SKB_GSO_TUNNEL_REMCSUM;
1794 }
1795 }
1630 1796
1631 skb = udp_tunnel_handle_offloads(skb, udp_sum); 1797 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1632 if (IS_ERR(skb)) 1798 if (IS_ERR(skb))
1633 return PTR_ERR(skb); 1799 return PTR_ERR(skb);
1634 1800
1635 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 1801 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1636 + VXLAN_HLEN + sizeof(struct iphdr) 1802 + VXLAN_HLEN + sizeof(struct iphdr)
1637 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 1803 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1638 1804
1639 /* Need space for new headers (invalidates iph ptr) */ 1805 /* Need space for new headers (invalidates iph ptr) */
1640 err = skb_cow_head(skb, min_headroom); 1806 err = skb_cow_head(skb, min_headroom);
@@ -1648,13 +1814,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
1648 return -ENOMEM; 1814 return -ENOMEM;
1649 1815
1650 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1816 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1651 vxh->vx_flags = htonl(VXLAN_FLAGS); 1817 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1652 vxh->vx_vni = vni; 1818 vxh->vx_vni = md->vni;
1819
1820 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1821 u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
1822 VXLAN_RCO_SHIFT;
1823
1824 if (skb->csum_offset == offsetof(struct udphdr, check))
1825 data |= VXLAN_RCO_UDP;
1826
1827 vxh->vx_vni |= htonl(data);
1828 vxh->vx_flags |= htonl(VXLAN_HF_RCO);
1829
1830 if (!skb_is_gso(skb)) {
1831 skb->ip_summed = CHECKSUM_NONE;
1832 skb->encapsulation = 0;
1833 }
1834 }
1835
1836 if (vxflags & VXLAN_F_GBP)
1837 vxlan_build_gbp_hdr(vxh, vxflags, md);
1653 1838
1654 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 1839 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1655 1840
1656 return udp_tunnel_xmit_skb(vs->sock, rt, skb, src, dst, tos, 1841 return udp_tunnel_xmit_skb(rt, skb, src, dst, tos,
1657 ttl, df, src_port, dst_port, xnet); 1842 ttl, df, src_port, dst_port, xnet,
1843 !(vxflags & VXLAN_F_UDP_CSUM));
1658} 1844}
1659EXPORT_SYMBOL_GPL(vxlan_xmit_skb); 1845EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1660 1846
@@ -1711,6 +1897,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1711 const struct iphdr *old_iph; 1897 const struct iphdr *old_iph;
1712 struct flowi4 fl4; 1898 struct flowi4 fl4;
1713 union vxlan_addr *dst; 1899 union vxlan_addr *dst;
1900 struct vxlan_metadata md;
1714 __be16 src_port = 0, dst_port; 1901 __be16 src_port = 0, dst_port;
1715 u32 vni; 1902 u32 vni;
1716 __be16 df = 0; 1903 __be16 df = 0;
@@ -1772,7 +1959,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1772 1959
1773 ip_rt_put(rt); 1960 ip_rt_put(rt);
1774 dst_vxlan = vxlan_find_vni(vxlan->net, vni, 1961 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1775 dst->sa.sa_family, dst_port); 1962 dst->sa.sa_family, dst_port,
1963 vxlan->flags);
1776 if (!dst_vxlan) 1964 if (!dst_vxlan)
1777 goto tx_error; 1965 goto tx_error;
1778 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1966 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1781,12 +1969,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1781 1969
1782 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 1970 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1783 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 1971 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1784 1972 md.vni = htonl(vni << 8);
1785 err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb, 1973 md.gbp = skb->mark;
1786 fl4.saddr, dst->sin.sin_addr.s_addr, 1974
1787 tos, ttl, df, src_port, dst_port, 1975 err = vxlan_xmit_skb(rt, skb, fl4.saddr,
1788 htonl(vni << 8), 1976 dst->sin.sin_addr.s_addr, tos, ttl, df,
1789 !net_eq(vxlan->net, dev_net(vxlan->dev))); 1977 src_port, dst_port, &md,
1978 !net_eq(vxlan->net, dev_net(vxlan->dev)),
1979 vxlan->flags);
1790 if (err < 0) { 1980 if (err < 0) {
1791 /* skb is already freed. */ 1981 /* skb is already freed. */
1792 skb = NULL; 1982 skb = NULL;
@@ -1830,7 +2020,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1830 2020
1831 dst_release(ndst); 2021 dst_release(ndst);
1832 dst_vxlan = vxlan_find_vni(vxlan->net, vni, 2022 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1833 dst->sa.sa_family, dst_port); 2023 dst->sa.sa_family, dst_port,
2024 vxlan->flags);
1834 if (!dst_vxlan) 2025 if (!dst_vxlan)
1835 goto tx_error; 2026 goto tx_error;
1836 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 2027 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1838,11 +2029,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1838 } 2029 }
1839 2030
1840 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2031 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2032 md.vni = htonl(vni << 8);
2033 md.gbp = skb->mark;
1841 2034
1842 err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb, 2035 err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr,
1843 dev, &fl6.saddr, &fl6.daddr, 0, ttl, 2036 0, ttl, src_port, dst_port, &md,
1844 src_port, dst_port, htonl(vni << 8), 2037 !net_eq(vxlan->net, dev_net(vxlan->dev)),
1845 !net_eq(vxlan->net, dev_net(vxlan->dev))); 2038 vxlan->flags);
1846#endif 2039#endif
1847 } 2040 }
1848 2041
@@ -1998,7 +2191,7 @@ static int vxlan_init(struct net_device *dev)
1998 2191
1999 spin_lock(&vn->sock_lock); 2192 spin_lock(&vn->sock_lock);
2000 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, 2193 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2001 vxlan->dst_port); 2194 vxlan->dst_port, vxlan->flags);
2002 if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) { 2195 if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) {
2003 /* If we have a socket with same port already, reuse it */ 2196 /* If we have a socket with same port already, reuse it */
2004 vxlan_vs_add_dev(vs, vxlan); 2197 vxlan_vs_add_dev(vs, vxlan);
@@ -2242,6 +2435,9 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2242 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, 2435 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2243 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, 2436 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2244 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, 2437 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2438 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2439 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2440 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2245}; 2441};
2246 2442
2247static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) 2443static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -2311,15 +2507,11 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2311 2507
2312 if (ipv6) { 2508 if (ipv6) {
2313 udp_conf.family = AF_INET6; 2509 udp_conf.family = AF_INET6;
2314 udp_conf.use_udp6_tx_checksums =
2315 !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2316 udp_conf.use_udp6_rx_checksums = 2510 udp_conf.use_udp6_rx_checksums =
2317 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); 2511 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2318 } else { 2512 } else {
2319 udp_conf.family = AF_INET; 2513 udp_conf.family = AF_INET;
2320 udp_conf.local_ip.s_addr = INADDR_ANY; 2514 udp_conf.local_ip.s_addr = INADDR_ANY;
2321 udp_conf.use_udp_checksums =
2322 !!(flags & VXLAN_F_UDP_CSUM);
2323 } 2515 }
2324 2516
2325 udp_conf.local_udp_port = port; 2517 udp_conf.local_udp_port = port;
@@ -2363,6 +2555,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2363 atomic_set(&vs->refcnt, 1); 2555 atomic_set(&vs->refcnt, 1);
2364 vs->rcv = rcv; 2556 vs->rcv = rcv;
2365 vs->data = data; 2557 vs->data = data;
2558 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2366 2559
2367 /* Initialize the vxlan udp offloads structure */ 2560 /* Initialize the vxlan udp offloads structure */
2368 vs->udp_offloads.port = port; 2561 vs->udp_offloads.port = port;
@@ -2401,7 +2594,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2401 return vs; 2594 return vs;
2402 2595
2403 spin_lock(&vn->sock_lock); 2596 spin_lock(&vn->sock_lock);
2404 vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); 2597 vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags);
2405 if (vs && ((vs->rcv != rcv) || 2598 if (vs && ((vs->rcv != rcv) ||
2406 !atomic_add_unless(&vs->refcnt, 1, 0))) 2599 !atomic_add_unless(&vs->refcnt, 1, 0)))
2407 vs = ERR_PTR(-EBUSY); 2600 vs = ERR_PTR(-EBUSY);
@@ -2557,8 +2750,19 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2557 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 2750 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2558 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 2751 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2559 2752
2753 if (data[IFLA_VXLAN_REMCSUM_TX] &&
2754 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
2755 vxlan->flags |= VXLAN_F_REMCSUM_TX;
2756
2757 if (data[IFLA_VXLAN_REMCSUM_RX] &&
2758 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
2759 vxlan->flags |= VXLAN_F_REMCSUM_RX;
2760
2761 if (data[IFLA_VXLAN_GBP])
2762 vxlan->flags |= VXLAN_F_GBP;
2763
2560 if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET, 2764 if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2561 vxlan->dst_port)) { 2765 vxlan->dst_port, vxlan->flags)) {
2562 pr_info("duplicate VNI %u\n", vni); 2766 pr_info("duplicate VNI %u\n", vni);
2563 return -EEXIST; 2767 return -EEXIST;
2564 } 2768 }
@@ -2625,6 +2829,8 @@ static size_t vxlan_get_size(const struct net_device *dev)
2625 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */ 2829 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
2626 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */ 2830 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2627 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */ 2831 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2832 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
2833 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
2628 0; 2834 0;
2629} 2835}
2630 2836
@@ -2690,18 +2896,33 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2690 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, 2896 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
2691 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || 2897 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
2692 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 2898 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
2693 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX))) 2899 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
2900 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
2901 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
2902 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
2903 !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
2694 goto nla_put_failure; 2904 goto nla_put_failure;
2695 2905
2696 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 2906 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
2697 goto nla_put_failure; 2907 goto nla_put_failure;
2698 2908
2909 if (vxlan->flags & VXLAN_F_GBP &&
2910 nla_put_flag(skb, IFLA_VXLAN_GBP))
2911 goto nla_put_failure;
2912
2699 return 0; 2913 return 0;
2700 2914
2701nla_put_failure: 2915nla_put_failure:
2702 return -EMSGSIZE; 2916 return -EMSGSIZE;
2703} 2917}
2704 2918
2919static struct net *vxlan_get_link_net(const struct net_device *dev)
2920{
2921 struct vxlan_dev *vxlan = netdev_priv(dev);
2922
2923 return vxlan->net;
2924}
2925
2705static struct rtnl_link_ops vxlan_link_ops __read_mostly = { 2926static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
2706 .kind = "vxlan", 2927 .kind = "vxlan",
2707 .maxtype = IFLA_VXLAN_MAX, 2928 .maxtype = IFLA_VXLAN_MAX,
@@ -2713,6 +2934,7 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
2713 .dellink = vxlan_dellink, 2934 .dellink = vxlan_dellink,
2714 .get_size = vxlan_get_size, 2935 .get_size = vxlan_get_size,
2715 .fill_info = vxlan_fill_info, 2936 .fill_info = vxlan_fill_info,
2937 .get_link_net = vxlan_get_link_net,
2716}; 2938};
2717 2939
2718static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, 2940static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 17fcaabb2687..f07a61899545 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1837,6 +1837,7 @@ static int adm8211_probe(struct pci_dev *pdev,
1837 if (!priv->map) { 1837 if (!priv->map) {
1838 printk(KERN_ERR "%s (adm8211): Cannot map device memory\n", 1838 printk(KERN_ERR "%s (adm8211): Cannot map device memory\n",
1839 pci_name(pdev)); 1839 pci_name(pdev));
1840 err = -ENOMEM;
1840 goto err_free_dev; 1841 goto err_free_dev;
1841 } 1842 }
1842 1843
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index ccba4fea7269..1eebe2ea3dfb 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -64,6 +64,7 @@ enum ath_op_flags {
64 ATH_OP_HW_RESET, 64 ATH_OP_HW_RESET,
65 ATH_OP_SCANNING, 65 ATH_OP_SCANNING,
66 ATH_OP_MULTI_CHANNEL, 66 ATH_OP_MULTI_CHANNEL,
67 ATH_OP_WOW_ENABLED,
67}; 68};
68 69
69enum ath_bus_type { 70enum ath_bus_type {
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 8b1b1adb477a..f4dbb3e93bf8 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -8,11 +8,15 @@ ath10k_core-y += mac.o \
8 htt_tx.o \ 8 htt_tx.o \
9 txrx.o \ 9 txrx.o \
10 wmi.o \ 10 wmi.o \
11 bmi.o 11 wmi-tlv.o \
12 bmi.o \
13 hw.o
12 14
13ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o 15ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
14ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o 16ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
15ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o 17ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
18ath10k_core-$(CONFIG_THERMAL) += thermal.o
19ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
16 20
17obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o 21obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
18ath10k_pci-y += pci.o \ 22ath10k_pci-y += pci.o \
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a156e6e48708..e508c65b6ba8 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -803,7 +803,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
803 int ce_id; 803 int ce_id;
804 804
805 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { 805 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
806 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 806 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
807 807
808 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); 808 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
809 ath10k_ce_error_intr_disable(ar, ctrl_addr); 809 ath10k_ce_error_intr_disable(ar, ctrl_addr);
@@ -832,7 +832,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
832 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 832 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
833 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; 833 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
834 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 834 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
835 u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); 835 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
836 836
837 nentries = roundup_pow_of_two(attr->src_nentries); 837 nentries = roundup_pow_of_two(attr->src_nentries);
838 838
@@ -869,7 +869,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
869 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 869 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
870 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; 870 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
871 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; 871 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
872 u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); 872 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
873 873
874 nentries = roundup_pow_of_two(attr->dest_nentries); 874 nentries = roundup_pow_of_two(attr->dest_nentries);
875 875
@@ -1051,7 +1051,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1051 1051
1052static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) 1052static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1053{ 1053{
1054 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 1054 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1055 1055
1056 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); 1056 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
1057 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); 1057 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
@@ -1061,7 +1061,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1061 1061
1062static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) 1062static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1063{ 1063{
1064 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 1064 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1065 1065
1066 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); 1066 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
1067 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); 1067 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
@@ -1093,10 +1093,12 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1093 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); 1093 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1094 BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > 1094 BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
1095 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); 1095 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1096 BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
1097 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1096 1098
1097 ce_state->ar = ar; 1099 ce_state->ar = ar;
1098 ce_state->id = ce_id; 1100 ce_state->id = ce_id;
1099 ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); 1101 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1100 ce_state->attr_flags = attr->flags; 1102 ce_state->attr_flags = attr->flags;
1101 ce_state->src_sz_max = attr->src_sz_max; 1103 ce_state->src_sz_max = attr->src_sz_max;
1102 1104
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 617a151e8ce4..c18647b87f71 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -394,7 +394,7 @@ struct ce_attr {
394#define DST_WATERMARK_HIGH_RESET 0 394#define DST_WATERMARK_HIGH_RESET 0
395#define DST_WATERMARK_ADDRESS 0x0050 395#define DST_WATERMARK_ADDRESS 0x0050
396 396
397static inline u32 ath10k_ce_base_address(unsigned int ce_id) 397static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
398{ 398{
399 return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; 399 return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
400} 400}
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 7762061a1944..310e12bc078a 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/firmware.h> 19#include <linux/firmware.h>
20#include <linux/of.h>
20 21
21#include "core.h" 22#include "core.h"
22#include "mac.h" 23#include "mac.h"
@@ -27,20 +28,18 @@
27#include "debug.h" 28#include "debug.h"
28#include "htt.h" 29#include "htt.h"
29#include "testmode.h" 30#include "testmode.h"
31#include "wmi-ops.h"
30 32
31unsigned int ath10k_debug_mask; 33unsigned int ath10k_debug_mask;
32static bool uart_print; 34static bool uart_print;
33static unsigned int ath10k_p2p;
34static bool skip_otp; 35static bool skip_otp;
35 36
36module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); 37module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
37module_param(uart_print, bool, 0644); 38module_param(uart_print, bool, 0644);
38module_param_named(p2p, ath10k_p2p, uint, 0644);
39module_param(skip_otp, bool, 0644); 39module_param(skip_otp, bool, 0644);
40 40
41MODULE_PARM_DESC(debug_mask, "Debugging mask"); 41MODULE_PARM_DESC(debug_mask, "Debugging mask");
42MODULE_PARM_DESC(uart_print, "Uart target debugging"); 42MODULE_PARM_DESC(uart_print, "Uart target debugging");
43MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
44MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); 43MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
45 44
46static const struct ath10k_hw_params ath10k_hw_params_list[] = { 45static const struct ath10k_hw_params ath10k_hw_params_list[] = {
@@ -48,11 +47,57 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
48 .id = QCA988X_HW_2_0_VERSION, 47 .id = QCA988X_HW_2_0_VERSION,
49 .name = "qca988x hw2.0", 48 .name = "qca988x hw2.0",
50 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, 49 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
50 .uart_pin = 7,
51 .fw = { 51 .fw = {
52 .dir = QCA988X_HW_2_0_FW_DIR, 52 .dir = QCA988X_HW_2_0_FW_DIR,
53 .fw = QCA988X_HW_2_0_FW_FILE, 53 .fw = QCA988X_HW_2_0_FW_FILE,
54 .otp = QCA988X_HW_2_0_OTP_FILE, 54 .otp = QCA988X_HW_2_0_OTP_FILE,
55 .board = QCA988X_HW_2_0_BOARD_DATA_FILE, 55 .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
56 .board_size = QCA988X_BOARD_DATA_SZ,
57 .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
58 },
59 },
60 {
61 .id = QCA6174_HW_2_1_VERSION,
62 .name = "qca6174 hw2.1",
63 .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
64 .uart_pin = 6,
65 .fw = {
66 .dir = QCA6174_HW_2_1_FW_DIR,
67 .fw = QCA6174_HW_2_1_FW_FILE,
68 .otp = QCA6174_HW_2_1_OTP_FILE,
69 .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
70 .board_size = QCA6174_BOARD_DATA_SZ,
71 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
72 },
73 },
74 {
75 .id = QCA6174_HW_3_0_VERSION,
76 .name = "qca6174 hw3.0",
77 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
78 .uart_pin = 6,
79 .fw = {
80 .dir = QCA6174_HW_3_0_FW_DIR,
81 .fw = QCA6174_HW_3_0_FW_FILE,
82 .otp = QCA6174_HW_3_0_OTP_FILE,
83 .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
84 .board_size = QCA6174_BOARD_DATA_SZ,
85 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
86 },
87 },
88 {
89 .id = QCA6174_HW_3_2_VERSION,
90 .name = "qca6174 hw3.2",
91 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
92 .uart_pin = 6,
93 .fw = {
94 /* uses same binaries as hw3.0 */
95 .dir = QCA6174_HW_3_0_FW_DIR,
96 .fw = QCA6174_HW_3_0_FW_FILE,
97 .otp = QCA6174_HW_3_0_OTP_FILE,
98 .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
99 .board_size = QCA6174_BOARD_DATA_SZ,
100 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
56 }, 101 },
57 }, 102 },
58}; 103};
@@ -146,8 +191,8 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
146static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data, 191static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
147 size_t data_len) 192 size_t data_len)
148{ 193{
149 u32 board_data_size = QCA988X_BOARD_DATA_SZ; 194 u32 board_data_size = ar->hw_params.fw.board_size;
150 u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; 195 u32 board_ext_data_size = ar->hw_params.fw.board_ext_size;
151 u32 board_ext_data_addr; 196 u32 board_ext_data_addr;
152 int ret; 197 int ret;
153 198
@@ -193,7 +238,7 @@ static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
193static int ath10k_download_board_data(struct ath10k *ar, const void *data, 238static int ath10k_download_board_data(struct ath10k *ar, const void *data,
194 size_t data_len) 239 size_t data_len)
195{ 240{
196 u32 board_data_size = QCA988X_BOARD_DATA_SZ; 241 u32 board_data_size = ar->hw_params.fw.board_size;
197 u32 address; 242 u32 address;
198 int ret; 243 int ret;
199 244
@@ -249,6 +294,63 @@ static int ath10k_download_cal_file(struct ath10k *ar)
249 return 0; 294 return 0;
250} 295}
251 296
297static int ath10k_download_cal_dt(struct ath10k *ar)
298{
299 struct device_node *node;
300 int data_len;
301 void *data;
302 int ret;
303
304 node = ar->dev->of_node;
305 if (!node)
306 /* Device Tree is optional, don't print any warnings if
307 * there's no node for ath10k.
308 */
309 return -ENOENT;
310
311 if (!of_get_property(node, "qcom,ath10k-calibration-data",
312 &data_len)) {
313 /* The calibration data node is optional */
314 return -ENOENT;
315 }
316
317 if (data_len != QCA988X_CAL_DATA_LEN) {
318 ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
319 data_len);
320 ret = -EMSGSIZE;
321 goto out;
322 }
323
324 data = kmalloc(data_len, GFP_KERNEL);
325 if (!data) {
326 ret = -ENOMEM;
327 goto out;
328 }
329
330 ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
331 data, data_len);
332 if (ret) {
333 ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
334 ret);
335 goto out_free;
336 }
337
338 ret = ath10k_download_board_data(ar, data, data_len);
339 if (ret) {
340 ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
341 ret);
342 goto out_free;
343 }
344
345 ret = 0;
346
347out_free:
348 kfree(data);
349
350out:
351 return ret;
352}
353
252static int ath10k_download_and_run_otp(struct ath10k *ar) 354static int ath10k_download_and_run_otp(struct ath10k *ar)
253{ 355{
254 u32 result, address = ar->hw_params.patch_load_addr; 356 u32 result, address = ar->hw_params.patch_load_addr;
@@ -447,7 +549,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
447 int ie_id, i, index, bit, ret; 549 int ie_id, i, index, bit, ret;
448 struct ath10k_fw_ie *hdr; 550 struct ath10k_fw_ie *hdr;
449 const u8 *data; 551 const u8 *data;
450 __le32 *timestamp; 552 __le32 *timestamp, *version;
451 553
452 /* first fetch the firmware file (firmware-*.bin) */ 554 /* first fetch the firmware file (firmware-*.bin) */
453 ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); 555 ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
@@ -562,6 +664,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
562 ar->otp_len = ie_len; 664 ar->otp_len = ie_len;
563 665
564 break; 666 break;
667 case ATH10K_FW_IE_WMI_OP_VERSION:
668 if (ie_len != sizeof(u32))
669 break;
670
671 version = (__le32 *)data;
672
673 ar->wmi.op_version = le32_to_cpup(version);
674
675 ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
676 ar->wmi.op_version);
677 break;
565 default: 678 default:
566 ath10k_warn(ar, "Unknown FW IE: %u\n", 679 ath10k_warn(ar, "Unknown FW IE: %u\n",
567 le32_to_cpu(hdr->id)); 680 le32_to_cpu(hdr->id));
@@ -582,13 +695,6 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
582 goto err; 695 goto err;
583 } 696 }
584 697
585 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
586 !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
587 ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
588 ret = -EINVAL;
589 goto err;
590 }
591
592 /* now fetch the board file */ 698 /* now fetch the board file */
593 if (ar->hw_params.fw.board == NULL) { 699 if (ar->hw_params.fw.board == NULL) {
594 ath10k_err(ar, "board data file not defined"); 700 ath10k_err(ar, "board data file not defined");
@@ -624,6 +730,13 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
624 /* calibration file is optional, don't check for any errors */ 730 /* calibration file is optional, don't check for any errors */
625 ath10k_fetch_cal_file(ar); 731 ath10k_fetch_cal_file(ar);
626 732
733 ar->fw_api = 4;
734 ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
735
736 ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
737 if (ret == 0)
738 goto success;
739
627 ar->fw_api = 3; 740 ar->fw_api = 3;
628 ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); 741 ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
629 742
@@ -662,7 +775,17 @@ static int ath10k_download_cal_data(struct ath10k *ar)
662 } 775 }
663 776
664 ath10k_dbg(ar, ATH10K_DBG_BOOT, 777 ath10k_dbg(ar, ATH10K_DBG_BOOT,
665 "boot did not find a calibration file, try OTP next: %d\n", 778 "boot did not find a calibration file, try DT next: %d\n",
779 ret);
780
781 ret = ath10k_download_cal_dt(ar);
782 if (ret == 0) {
783 ar->cal_mode = ATH10K_CAL_MODE_DT;
784 goto done;
785 }
786
787 ath10k_dbg(ar, ATH10K_DBG_BOOT,
788 "boot did not find DT entry, try OTP next: %d\n",
666 ret); 789 ret);
667 790
668 ret = ath10k_download_and_run_otp(ar); 791 ret = ath10k_download_and_run_otp(ar);
@@ -696,7 +819,7 @@ static int ath10k_init_uart(struct ath10k *ar)
696 if (!uart_print) 819 if (!uart_print)
697 return 0; 820 return 0;
698 821
699 ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7); 822 ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin);
700 if (ret) { 823 if (ret) {
701 ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); 824 ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
702 return ret; 825 return ret;
@@ -764,6 +887,7 @@ static void ath10k_core_restart(struct work_struct *work)
764 complete_all(&ar->offchan_tx_completed); 887 complete_all(&ar->offchan_tx_completed);
765 complete_all(&ar->install_key_done); 888 complete_all(&ar->install_key_done);
766 complete_all(&ar->vdev_setup_done); 889 complete_all(&ar->vdev_setup_done);
890 complete_all(&ar->thermal.wmi_sync);
767 wake_up(&ar->htt.empty_tx_wq); 891 wake_up(&ar->htt.empty_tx_wq);
768 wake_up(&ar->wmi.tx_credits_wq); 892 wake_up(&ar->wmi.tx_credits_wq);
769 wake_up(&ar->peer_mapping_wq); 893 wake_up(&ar->peer_mapping_wq);
@@ -799,15 +923,63 @@ static void ath10k_core_restart(struct work_struct *work)
799 mutex_unlock(&ar->conf_mutex); 923 mutex_unlock(&ar->conf_mutex);
800} 924}
801 925
802static void ath10k_core_init_max_sta_count(struct ath10k *ar) 926static int ath10k_core_init_firmware_features(struct ath10k *ar)
803{ 927{
804 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 928 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
805 ar->max_num_peers = TARGET_10X_NUM_PEERS; 929 !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
806 ar->max_num_stations = TARGET_10X_NUM_STATIONS; 930 ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
807 } else { 931 return -EINVAL;
932 }
933
934 if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
935 ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
936 ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
937 return -EINVAL;
938 }
939
940 /* Backwards compatibility for firmwares without
941 * ATH10K_FW_IE_WMI_OP_VERSION.
942 */
943 if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
944 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
945 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
946 ar->fw_features))
947 ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
948 else
949 ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
950 } else {
951 ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
952 }
953 }
954
955 switch (ar->wmi.op_version) {
956 case ATH10K_FW_WMI_OP_VERSION_MAIN:
808 ar->max_num_peers = TARGET_NUM_PEERS; 957 ar->max_num_peers = TARGET_NUM_PEERS;
809 ar->max_num_stations = TARGET_NUM_STATIONS; 958 ar->max_num_stations = TARGET_NUM_STATIONS;
959 ar->max_num_vdevs = TARGET_NUM_VDEVS;
960 ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
961 break;
962 case ATH10K_FW_WMI_OP_VERSION_10_1:
963 case ATH10K_FW_WMI_OP_VERSION_10_2:
964 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
965 ar->max_num_peers = TARGET_10X_NUM_PEERS;
966 ar->max_num_stations = TARGET_10X_NUM_STATIONS;
967 ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
968 ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
969 break;
970 case ATH10K_FW_WMI_OP_VERSION_TLV:
971 ar->max_num_peers = TARGET_TLV_NUM_PEERS;
972 ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
973 ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
974 ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
975 break;
976 case ATH10K_FW_WMI_OP_VERSION_UNSET:
977 case ATH10K_FW_WMI_OP_VERSION_MAX:
978 WARN_ON(1);
979 return -EINVAL;
810 } 980 }
981
982 return 0;
811} 983}
812 984
813int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) 985int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
@@ -932,6 +1104,18 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
932 goto err_hif_stop; 1104 goto err_hif_stop;
933 } 1105 }
934 1106
1107 /* If firmware indicates Full Rx Reorder support it must be used in a
1108 * slightly different manner. Let HTT code know.
1109 */
1110 ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
1111 ar->wmi.svc_map));
1112
1113 status = ath10k_htt_rx_ring_refill(ar);
1114 if (status) {
1115 ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
1116 goto err_hif_stop;
1117 }
1118
935 /* we don't care about HTT in UTF mode */ 1119 /* we don't care about HTT in UTF mode */
936 if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { 1120 if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
937 status = ath10k_htt_setup(&ar->htt); 1121 status = ath10k_htt_setup(&ar->htt);
@@ -945,10 +1129,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
945 if (status) 1129 if (status)
946 goto err_hif_stop; 1130 goto err_hif_stop;
947 1131
948 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 1132 ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
949 ar->free_vdev_map = (1LL << TARGET_10X_NUM_VDEVS) - 1;
950 else
951 ar->free_vdev_map = (1LL << TARGET_NUM_VDEVS) - 1;
952 1133
953 INIT_LIST_HEAD(&ar->arvifs); 1134 INIT_LIST_HEAD(&ar->arvifs);
954 1135
@@ -1025,8 +1206,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
1025 ret = ath10k_bmi_get_target_info(ar, &target_info); 1206 ret = ath10k_bmi_get_target_info(ar, &target_info);
1026 if (ret) { 1207 if (ret) {
1027 ath10k_err(ar, "could not get target info (%d)\n", ret); 1208 ath10k_err(ar, "could not get target info (%d)\n", ret);
1028 ath10k_hif_power_down(ar); 1209 goto err_power_down;
1029 return ret;
1030 } 1210 }
1031 1211
1032 ar->target_version = target_info.version; 1212 ar->target_version = target_info.version;
@@ -1035,28 +1215,28 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
1035 ret = ath10k_init_hw_params(ar); 1215 ret = ath10k_init_hw_params(ar);
1036 if (ret) { 1216 if (ret) {
1037 ath10k_err(ar, "could not get hw params (%d)\n", ret); 1217 ath10k_err(ar, "could not get hw params (%d)\n", ret);
1038 ath10k_hif_power_down(ar); 1218 goto err_power_down;
1039 return ret;
1040 } 1219 }
1041 1220
1042 ret = ath10k_core_fetch_firmware_files(ar); 1221 ret = ath10k_core_fetch_firmware_files(ar);
1043 if (ret) { 1222 if (ret) {
1044 ath10k_err(ar, "could not fetch firmware files (%d)\n", ret); 1223 ath10k_err(ar, "could not fetch firmware files (%d)\n", ret);
1045 ath10k_hif_power_down(ar); 1224 goto err_power_down;
1046 return ret;
1047 } 1225 }
1048 1226
1049 ath10k_core_init_max_sta_count(ar); 1227 ret = ath10k_core_init_firmware_features(ar);
1228 if (ret) {
1229 ath10k_err(ar, "fatal problem with firmware features: %d\n",
1230 ret);
1231 goto err_free_firmware_files;
1232 }
1050 1233
1051 mutex_lock(&ar->conf_mutex); 1234 mutex_lock(&ar->conf_mutex);
1052 1235
1053 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); 1236 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
1054 if (ret) { 1237 if (ret) {
1055 ath10k_err(ar, "could not init core (%d)\n", ret); 1238 ath10k_err(ar, "could not init core (%d)\n", ret);
1056 ath10k_core_free_firmware_files(ar); 1239 goto err_unlock;
1057 ath10k_hif_power_down(ar);
1058 mutex_unlock(&ar->conf_mutex);
1059 return ret;
1060 } 1240 }
1061 1241
1062 ath10k_print_driver_info(ar); 1242 ath10k_print_driver_info(ar);
@@ -1066,34 +1246,17 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
1066 1246
1067 ath10k_hif_power_down(ar); 1247 ath10k_hif_power_down(ar);
1068 return 0; 1248 return 0;
1069}
1070
1071static int ath10k_core_check_chip_id(struct ath10k *ar)
1072{
1073 u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
1074
1075 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
1076 ar->chip_id, hw_revision);
1077 1249
1078 /* Check that we are not using hw1.0 (some of them have same pci id 1250err_unlock:
1079 * as hw2.0) before doing anything else as ath10k crashes horribly 1251 mutex_unlock(&ar->conf_mutex);
1080 * due to missing hw1.0 workarounds. */
1081 switch (hw_revision) {
1082 case QCA988X_HW_1_0_CHIP_ID_REV:
1083 ath10k_err(ar, "ERROR: qca988x hw1.0 is not supported\n");
1084 return -EOPNOTSUPP;
1085 1252
1086 case QCA988X_HW_2_0_CHIP_ID_REV: 1253err_free_firmware_files:
1087 /* known hardware revision, continue normally */ 1254 ath10k_core_free_firmware_files(ar);
1088 return 0;
1089 1255
1090 default: 1256err_power_down:
1091 ath10k_warn(ar, "Warning: hardware revision unknown (0x%x), expect problems\n", 1257 ath10k_hif_power_down(ar);
1092 ar->chip_id);
1093 return 0;
1094 }
1095 1258
1096 return 0; 1259 return ret;
1097} 1260}
1098 1261
1099static void ath10k_core_register_work(struct work_struct *work) 1262static void ath10k_core_register_work(struct work_struct *work)
@@ -1125,9 +1288,18 @@ static void ath10k_core_register_work(struct work_struct *work)
1125 goto err_debug_destroy; 1288 goto err_debug_destroy;
1126 } 1289 }
1127 1290
1291 status = ath10k_thermal_register(ar);
1292 if (status) {
1293 ath10k_err(ar, "could not register thermal device: %d\n",
1294 status);
1295 goto err_spectral_destroy;
1296 }
1297
1128 set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags); 1298 set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
1129 return; 1299 return;
1130 1300
1301err_spectral_destroy:
1302 ath10k_spectral_destroy(ar);
1131err_debug_destroy: 1303err_debug_destroy:
1132 ath10k_debug_destroy(ar); 1304 ath10k_debug_destroy(ar);
1133err_unregister_mac: 1305err_unregister_mac:
@@ -1143,16 +1315,7 @@ err:
1143 1315
1144int ath10k_core_register(struct ath10k *ar, u32 chip_id) 1316int ath10k_core_register(struct ath10k *ar, u32 chip_id)
1145{ 1317{
1146 int status;
1147
1148 ar->chip_id = chip_id; 1318 ar->chip_id = chip_id;
1149
1150 status = ath10k_core_check_chip_id(ar);
1151 if (status) {
1152 ath10k_err(ar, "Unsupported chip id 0x%08x\n", ar->chip_id);
1153 return status;
1154 }
1155
1156 queue_work(ar->workqueue, &ar->register_work); 1319 queue_work(ar->workqueue, &ar->register_work);
1157 1320
1158 return 0; 1321 return 0;
@@ -1166,6 +1329,7 @@ void ath10k_core_unregister(struct ath10k *ar)
1166 if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) 1329 if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
1167 return; 1330 return;
1168 1331
1332 ath10k_thermal_unregister(ar);
1169 /* Stop spectral before unregistering from mac80211 to remove the 1333 /* Stop spectral before unregistering from mac80211 to remove the
1170 * relayfs debugfs file cleanly. Otherwise the parent debugfs tree 1334 * relayfs debugfs file cleanly. Otherwise the parent debugfs tree
1171 * would be already be free'd recursively, leading to a double free. 1335 * would be already be free'd recursively, leading to a double free.
@@ -1187,6 +1351,7 @@ EXPORT_SYMBOL(ath10k_core_unregister);
1187 1351
1188struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, 1352struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
1189 enum ath10k_bus bus, 1353 enum ath10k_bus bus,
1354 enum ath10k_hw_rev hw_rev,
1190 const struct ath10k_hif_ops *hif_ops) 1355 const struct ath10k_hif_ops *hif_ops)
1191{ 1356{
1192 struct ath10k *ar; 1357 struct ath10k *ar;
@@ -1198,13 +1363,25 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
1198 1363
1199 ar->ath_common.priv = ar; 1364 ar->ath_common.priv = ar;
1200 ar->ath_common.hw = ar->hw; 1365 ar->ath_common.hw = ar->hw;
1201
1202 ar->p2p = !!ath10k_p2p;
1203 ar->dev = dev; 1366 ar->dev = dev;
1204 1367 ar->hw_rev = hw_rev;
1205 ar->hif.ops = hif_ops; 1368 ar->hif.ops = hif_ops;
1206 ar->hif.bus = bus; 1369 ar->hif.bus = bus;
1207 1370
1371 switch (hw_rev) {
1372 case ATH10K_HW_QCA988X:
1373 ar->regs = &qca988x_regs;
1374 break;
1375 case ATH10K_HW_QCA6174:
1376 ar->regs = &qca6174_regs;
1377 break;
1378 default:
1379 ath10k_err(ar, "unsupported core hardware revision %d\n",
1380 hw_rev);
1381 ret = -ENOTSUPP;
1382 goto err_free_mac;
1383 }
1384
1208 init_completion(&ar->scan.started); 1385 init_completion(&ar->scan.started);
1209 init_completion(&ar->scan.completed); 1386 init_completion(&ar->scan.completed);
1210 init_completion(&ar->scan.on_channel); 1387 init_completion(&ar->scan.on_channel);
@@ -1212,6 +1389,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
1212 1389
1213 init_completion(&ar->install_key_done); 1390 init_completion(&ar->install_key_done);
1214 init_completion(&ar->vdev_setup_done); 1391 init_completion(&ar->vdev_setup_done);
1392 init_completion(&ar->thermal.wmi_sync);
1215 1393
1216 INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work); 1394 INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
1217 1395
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 514c219263a5..d60e46fe6d19 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -34,6 +34,7 @@
34#include "../regd.h" 34#include "../regd.h"
35#include "../dfs_pattern_detector.h" 35#include "../dfs_pattern_detector.h"
36#include "spectral.h" 36#include "spectral.h"
37#include "thermal.h"
37 38
38#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) 39#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
39#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) 40#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -96,6 +97,11 @@ struct ath10k_skb_cb {
96 } bcn; 97 } bcn;
97} __packed; 98} __packed;
98 99
100struct ath10k_skb_rxcb {
101 dma_addr_t paddr;
102 struct hlist_node hlist;
103};
104
99static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) 105static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
100{ 106{
101 BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) > 107 BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
@@ -103,6 +109,15 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
103 return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; 109 return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
104} 110}
105 111
112static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
113{
114 BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
115 return (struct ath10k_skb_rxcb *)skb->cb;
116}
117
118#define ATH10K_RXCB_SKB(rxcb) \
119 container_of((void *)rxcb, struct sk_buff, cb)
120
106static inline u32 host_interest_item_address(u32 item_offset) 121static inline u32 host_interest_item_address(u32 item_offset)
107{ 122{
108 return QCA988X_HOST_INTEREST_ADDRESS + item_offset; 123 return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
@@ -120,6 +135,7 @@ struct ath10k_mem_chunk {
120}; 135};
121 136
122struct ath10k_wmi { 137struct ath10k_wmi {
138 enum ath10k_fw_wmi_op_version op_version;
123 enum ath10k_htc_ep_id eid; 139 enum ath10k_htc_ep_id eid;
124 struct completion service_ready; 140 struct completion service_ready;
125 struct completion unified_ready; 141 struct completion unified_ready;
@@ -128,6 +144,7 @@ struct ath10k_wmi {
128 struct wmi_cmd_map *cmd; 144 struct wmi_cmd_map *cmd;
129 struct wmi_vdev_param_map *vdev_param; 145 struct wmi_vdev_param_map *vdev_param;
130 struct wmi_pdev_param_map *pdev_param; 146 struct wmi_pdev_param_map *pdev_param;
147 const struct wmi_ops *ops;
131 148
132 u32 num_mem_chunks; 149 u32 num_mem_chunks;
133 struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS]; 150 struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
@@ -236,10 +253,21 @@ struct ath10k_sta {
236 u32 smps; 253 u32 smps;
237 254
238 struct work_struct update_wk; 255 struct work_struct update_wk;
256
257#ifdef CONFIG_MAC80211_DEBUGFS
258 /* protected by conf_mutex */
259 bool aggr_mode;
260#endif
239}; 261};
240 262
241#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) 263#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
242 264
265enum ath10k_beacon_state {
266 ATH10K_BEACON_SCHEDULED = 0,
267 ATH10K_BEACON_SENDING,
268 ATH10K_BEACON_SENT,
269};
270
243struct ath10k_vif { 271struct ath10k_vif {
244 struct list_head list; 272 struct list_head list;
245 273
@@ -250,7 +278,7 @@ struct ath10k_vif {
250 u32 dtim_period; 278 u32 dtim_period;
251 struct sk_buff *beacon; 279 struct sk_buff *beacon;
252 /* protected by data_lock */ 280 /* protected by data_lock */
253 bool beacon_sent; 281 enum ath10k_beacon_state beacon_state;
254 void *beacon_buf; 282 void *beacon_buf;
255 dma_addr_t beacon_paddr; 283 dma_addr_t beacon_paddr;
256 284
@@ -263,10 +291,8 @@ struct ath10k_vif {
263 u32 aid; 291 u32 aid;
264 u8 bssid[ETH_ALEN]; 292 u8 bssid[ETH_ALEN];
265 293
266 struct work_struct wep_key_work;
267 struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; 294 struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
268 u8 def_wep_key_idx; 295 s8 def_wep_key_idx;
269 u8 def_wep_key_newidx;
270 296
271 u16 tx_seq_no; 297 u16 tx_seq_no;
272 298
@@ -293,6 +319,7 @@ struct ath10k_vif {
293 bool use_cts_prot; 319 bool use_cts_prot;
294 int num_legacy_stations; 320 int num_legacy_stations;
295 int txpower; 321 int txpower;
322 struct wmi_wmm_params_all_arg wmm_params;
296}; 323};
297 324
298struct ath10k_vif_iter { 325struct ath10k_vif_iter {
@@ -323,8 +350,10 @@ struct ath10k_debug {
323 350
324 /* protected by conf_mutex */ 351 /* protected by conf_mutex */
325 u32 fw_dbglog_mask; 352 u32 fw_dbglog_mask;
353 u32 fw_dbglog_level;
326 u32 pktlog_filter; 354 u32 pktlog_filter;
327 u32 reg_addr; 355 u32 reg_addr;
356 u32 nf_cal_period;
328 357
329 u8 htt_max_amsdu; 358 u8 htt_max_amsdu;
330 u8 htt_max_ampdu; 359 u8 htt_max_ampdu;
@@ -369,7 +398,7 @@ enum ath10k_fw_features {
369 /* wmi_mgmt_rx_hdr contains extra RSSI information */ 398 /* wmi_mgmt_rx_hdr contains extra RSSI information */
370 ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0, 399 ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
371 400
372 /* firmware from 10X branch */ 401 /* Firmware from 10X branch. Deprecated, don't use in new code. */
373 ATH10K_FW_FEATURE_WMI_10X = 1, 402 ATH10K_FW_FEATURE_WMI_10X = 1,
374 403
375 /* firmware support tx frame management over WMI, otherwise it's HTT */ 404 /* firmware support tx frame management over WMI, otherwise it's HTT */
@@ -378,8 +407,9 @@ enum ath10k_fw_features {
378 /* Firmware does not support P2P */ 407 /* Firmware does not support P2P */
379 ATH10K_FW_FEATURE_NO_P2P = 3, 408 ATH10K_FW_FEATURE_NO_P2P = 3,
380 409
381 /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature bit 410 /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature
382 * is required to be set as well. 411 * bit is required to be set as well. Deprecated, don't use in new
412 * code.
383 */ 413 */
384 ATH10K_FW_FEATURE_WMI_10_2 = 4, 414 ATH10K_FW_FEATURE_WMI_10_2 = 4,
385 415
@@ -401,6 +431,7 @@ enum ath10k_dev_flags {
401enum ath10k_cal_mode { 431enum ath10k_cal_mode {
402 ATH10K_CAL_MODE_FILE, 432 ATH10K_CAL_MODE_FILE,
403 ATH10K_CAL_MODE_OTP, 433 ATH10K_CAL_MODE_OTP,
434 ATH10K_CAL_MODE_DT,
404}; 435};
405 436
406static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) 437static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
@@ -410,6 +441,8 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
410 return "file"; 441 return "file";
411 case ATH10K_CAL_MODE_OTP: 442 case ATH10K_CAL_MODE_OTP:
412 return "otp"; 443 return "otp";
444 case ATH10K_CAL_MODE_DT:
445 return "dt";
413 } 446 }
414 447
415 return "unknown"; 448 return "unknown";
@@ -444,6 +477,7 @@ struct ath10k {
444 struct device *dev; 477 struct device *dev;
445 u8 mac_addr[ETH_ALEN]; 478 u8 mac_addr[ETH_ALEN];
446 479
480 enum ath10k_hw_rev hw_rev;
447 u32 chip_id; 481 u32 chip_id;
448 u32 target_version; 482 u32 target_version;
449 u8 fw_version_major; 483 u8 fw_version_major;
@@ -459,9 +493,6 @@ struct ath10k {
459 493
460 DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); 494 DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
461 495
462 struct targetdef *targetdef;
463 struct hostdef *hostdef;
464
465 bool p2p; 496 bool p2p;
466 497
467 struct { 498 struct {
@@ -471,6 +502,7 @@ struct ath10k {
471 502
472 struct completion target_suspend; 503 struct completion target_suspend;
473 504
505 const struct ath10k_hw_regs *regs;
474 struct ath10k_bmi bmi; 506 struct ath10k_bmi bmi;
475 struct ath10k_wmi wmi; 507 struct ath10k_wmi wmi;
476 struct ath10k_htc htc; 508 struct ath10k_htc htc;
@@ -480,12 +512,15 @@ struct ath10k {
480 u32 id; 512 u32 id;
481 const char *name; 513 const char *name;
482 u32 patch_load_addr; 514 u32 patch_load_addr;
515 int uart_pin;
483 516
484 struct ath10k_hw_params_fw { 517 struct ath10k_hw_params_fw {
485 const char *dir; 518 const char *dir;
486 const char *fw; 519 const char *fw;
487 const char *otp; 520 const char *otp;
488 const char *board; 521 const char *board;
522 size_t board_size;
523 size_t board_ext_size;
489 } fw; 524 } fw;
490 } hw_params; 525 } hw_params;
491 526
@@ -548,7 +583,6 @@ struct ath10k {
548 u8 cfg_tx_chainmask; 583 u8 cfg_tx_chainmask;
549 u8 cfg_rx_chainmask; 584 u8 cfg_rx_chainmask;
550 585
551 struct wmi_pdev_set_wmm_params_arg wmm_params;
552 struct completion install_key_done; 586 struct completion install_key_done;
553 587
554 struct completion vdev_setup_done; 588 struct completion vdev_setup_done;
@@ -571,6 +605,7 @@ struct ath10k {
571 605
572 int max_num_peers; 606 int max_num_peers;
573 int max_num_stations; 607 int max_num_stations;
608 int max_num_vdevs;
574 609
575 struct work_struct offchan_tx_work; 610 struct work_struct offchan_tx_work;
576 struct sk_buff_head offchan_tx_queue; 611 struct sk_buff_head offchan_tx_queue;
@@ -610,6 +645,7 @@ struct ath10k {
610 /* protected by conf_mutex */ 645 /* protected by conf_mutex */
611 const struct firmware *utf; 646 const struct firmware *utf;
612 DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT); 647 DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
648 enum ath10k_fw_wmi_op_version orig_wmi_op_version;
613 649
614 /* protected by data_lock */ 650 /* protected by data_lock */
615 bool utf_monitor; 651 bool utf_monitor;
@@ -622,12 +658,15 @@ struct ath10k {
622 u32 fw_cold_reset_counter; 658 u32 fw_cold_reset_counter;
623 } stats; 659 } stats;
624 660
661 struct ath10k_thermal thermal;
662
625 /* must be last */ 663 /* must be last */
626 u8 drv_priv[0] __aligned(sizeof(void *)); 664 u8 drv_priv[0] __aligned(sizeof(void *));
627}; 665};
628 666
629struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, 667struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
630 enum ath10k_bus bus, 668 enum ath10k_bus bus,
669 enum ath10k_hw_rev hw_rev,
631 const struct ath10k_hif_ops *hif_ops); 670 const struct ath10k_hif_ops *hif_ops);
632void ath10k_core_destroy(struct ath10k *ar); 671void ath10k_core_destroy(struct ath10k *ar);
633 672
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index a716758f14b0..d2281e5c2ffe 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -23,6 +23,7 @@
23#include "core.h" 23#include "core.h"
24#include "debug.h" 24#include "debug.h"
25#include "hif.h" 25#include "hif.h"
26#include "wmi-ops.h"
26 27
27/* ms */ 28/* ms */
28#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 29#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
@@ -123,7 +124,7 @@ EXPORT_SYMBOL(ath10k_info);
123 124
124void ath10k_print_driver_info(struct ath10k *ar) 125void ath10k_print_driver_info(struct ath10k *ar)
125{ 126{
126 ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d.%d.%d.%d cal %s max_sta %d\n", 127 ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
127 ar->hw_params.name, 128 ar->hw_params.name,
128 ar->target_version, 129 ar->target_version,
129 ar->chip_id, 130 ar->chip_id,
@@ -131,10 +132,7 @@ void ath10k_print_driver_info(struct ath10k *ar)
131 ar->fw_api, 132 ar->fw_api,
132 ar->htt.target_version_major, 133 ar->htt.target_version_major,
133 ar->htt.target_version_minor, 134 ar->htt.target_version_minor,
134 ar->fw_version_major, 135 ar->wmi.op_version,
135 ar->fw_version_minor,
136 ar->fw_version_release,
137 ar->fw_version_build,
138 ath10k_cal_mode_str(ar->cal_mode), 136 ath10k_cal_mode_str(ar->cal_mode),
139 ar->max_num_stations); 137 ar->max_num_stations);
140 ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n", 138 ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
@@ -373,7 +371,7 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
373 371
374 ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete, 372 ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
375 1*HZ); 373 1*HZ);
376 if (ret <= 0) 374 if (ret == 0)
377 return -ETIMEDOUT; 375 return -ETIMEDOUT;
378 376
379 spin_lock_bh(&ar->data_lock); 377 spin_lock_bh(&ar->data_lock);
@@ -1320,10 +1318,10 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
1320{ 1318{
1321 struct ath10k *ar = file->private_data; 1319 struct ath10k *ar = file->private_data;
1322 unsigned int len; 1320 unsigned int len;
1323 char buf[32]; 1321 char buf[64];
1324 1322
1325 len = scnprintf(buf, sizeof(buf), "0x%08x\n", 1323 len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
1326 ar->debug.fw_dbglog_mask); 1324 ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
1327 1325
1328 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 1326 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1329} 1327}
@@ -1333,19 +1331,32 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
1333 size_t count, loff_t *ppos) 1331 size_t count, loff_t *ppos)
1334{ 1332{
1335 struct ath10k *ar = file->private_data; 1333 struct ath10k *ar = file->private_data;
1336 unsigned long mask;
1337 int ret; 1334 int ret;
1335 char buf[64];
1336 unsigned int log_level, mask;
1338 1337
1339 ret = kstrtoul_from_user(user_buf, count, 0, &mask); 1338 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
1340 if (ret) 1339
1341 return ret; 1340 /* make sure that buf is null terminated */
1341 buf[sizeof(buf) - 1] = 0;
1342
1343 ret = sscanf(buf, "%x %u", &mask, &log_level);
1344
1345 if (!ret)
1346 return -EINVAL;
1347
1348 if (ret == 1)
1349 /* default if user did not specify */
1350 log_level = ATH10K_DBGLOG_LEVEL_WARN;
1342 1351
1343 mutex_lock(&ar->conf_mutex); 1352 mutex_lock(&ar->conf_mutex);
1344 1353
1345 ar->debug.fw_dbglog_mask = mask; 1354 ar->debug.fw_dbglog_mask = mask;
1355 ar->debug.fw_dbglog_level = log_level;
1346 1356
1347 if (ar->state == ATH10K_STATE_ON) { 1357 if (ar->state == ATH10K_STATE_ON) {
1348 ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); 1358 ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
1359 ar->debug.fw_dbglog_level);
1349 if (ret) { 1360 if (ret) {
1350 ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", 1361 ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
1351 ret); 1362 ret);
@@ -1607,6 +1618,73 @@ static const struct file_operations fops_cal_data = {
1607 .llseek = default_llseek, 1618 .llseek = default_llseek,
1608}; 1619};
1609 1620
1621static ssize_t ath10k_read_nf_cal_period(struct file *file,
1622 char __user *user_buf,
1623 size_t count, loff_t *ppos)
1624{
1625 struct ath10k *ar = file->private_data;
1626 unsigned int len;
1627 char buf[32];
1628
1629 len = scnprintf(buf, sizeof(buf), "%d\n",
1630 ar->debug.nf_cal_period);
1631
1632 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1633}
1634
1635static ssize_t ath10k_write_nf_cal_period(struct file *file,
1636 const char __user *user_buf,
1637 size_t count, loff_t *ppos)
1638{
1639 struct ath10k *ar = file->private_data;
1640 unsigned long period;
1641 int ret;
1642
1643 ret = kstrtoul_from_user(user_buf, count, 0, &period);
1644 if (ret)
1645 return ret;
1646
1647 if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX)
1648 return -EINVAL;
1649
1650 /* there's no way to switch back to the firmware default */
1651 if (period == 0)
1652 return -EINVAL;
1653
1654 mutex_lock(&ar->conf_mutex);
1655
1656 ar->debug.nf_cal_period = period;
1657
1658 if (ar->state != ATH10K_STATE_ON) {
1659 /* firmware is not running, nothing else to do */
1660 ret = count;
1661 goto exit;
1662 }
1663
1664 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period,
1665 ar->debug.nf_cal_period);
1666 if (ret) {
1667 ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n",
1668 ret);
1669 goto exit;
1670 }
1671
1672 ret = count;
1673
1674exit:
1675 mutex_unlock(&ar->conf_mutex);
1676
1677 return ret;
1678}
1679
1680static const struct file_operations fops_nf_cal_period = {
1681 .read = ath10k_read_nf_cal_period,
1682 .write = ath10k_write_nf_cal_period,
1683 .open = simple_open,
1684 .owner = THIS_MODULE,
1685 .llseek = default_llseek,
1686};
1687
1610int ath10k_debug_start(struct ath10k *ar) 1688int ath10k_debug_start(struct ath10k *ar)
1611{ 1689{
1612 int ret; 1690 int ret;
@@ -1620,7 +1698,8 @@ int ath10k_debug_start(struct ath10k *ar)
1620 ret); 1698 ret);
1621 1699
1622 if (ar->debug.fw_dbglog_mask) { 1700 if (ar->debug.fw_dbglog_mask) {
1623 ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); 1701 ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
1702 ATH10K_DBGLOG_LEVEL_WARN);
1624 if (ret) 1703 if (ret)
1625 /* not serious */ 1704 /* not serious */
1626 ath10k_warn(ar, "failed to enable dbglog during start: %d", 1705 ath10k_warn(ar, "failed to enable dbglog during start: %d",
@@ -1642,6 +1721,16 @@ int ath10k_debug_start(struct ath10k *ar)
1642 ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); 1721 ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
1643 } 1722 }
1644 1723
1724 if (ar->debug.nf_cal_period) {
1725 ret = ath10k_wmi_pdev_set_param(ar,
1726 ar->wmi.pdev_param->cal_period,
1727 ar->debug.nf_cal_period);
1728 if (ret)
1729 /* not serious */
1730 ath10k_warn(ar, "cal period cfg failed from debug start: %d\n",
1731 ret);
1732 }
1733
1645 return ret; 1734 return ret;
1646} 1735}
1647 1736
@@ -1880,6 +1969,9 @@ int ath10k_debug_register(struct ath10k *ar)
1880 debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy, 1969 debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
1881 ar, &fops_cal_data); 1970 ar, &fops_cal_data);
1882 1971
1972 debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
1973 ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
1974
1883 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { 1975 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
1884 debugfs_create_file("dfs_simulate_radar", S_IWUSR, 1976 debugfs_create_file("dfs_simulate_radar", S_IWUSR,
1885 ar->debug.debugfs_phy, ar, 1977 ar->debug.debugfs_phy, ar,
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 1b87a5dbec53..a12b8323f9f1 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -48,6 +48,12 @@ enum ath10k_pktlog_filter {
48 ATH10K_PKTLOG_ANY = 0x00000001f, 48 ATH10K_PKTLOG_ANY = 0x00000001f,
49}; 49};
50 50
51enum ath10k_dbg_aggr_mode {
52 ATH10K_DBG_AGGR_MODE_AUTO,
53 ATH10K_DBG_AGGR_MODE_MANUAL,
54 ATH10K_DBG_AGGR_MODE_MAX,
55};
56
51extern unsigned int ath10k_debug_mask; 57extern unsigned int ath10k_debug_mask;
52 58
53__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); 59__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
@@ -77,7 +83,6 @@ int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
77void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, 83void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
78 struct ieee80211_vif *vif, 84 struct ieee80211_vif *vif,
79 struct ethtool_stats *stats, u64 *data); 85 struct ethtool_stats *stats, u64 *data);
80
81#else 86#else
82static inline int ath10k_debug_start(struct ath10k *ar) 87static inline int ath10k_debug_start(struct ath10k *ar)
83{ 88{
@@ -129,6 +134,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
129#define ath10k_debug_get_et_stats NULL 134#define ath10k_debug_get_et_stats NULL
130 135
131#endif /* CONFIG_ATH10K_DEBUGFS */ 136#endif /* CONFIG_ATH10K_DEBUGFS */
137#ifdef CONFIG_MAC80211_DEBUGFS
138void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
139 struct ieee80211_sta *sta, struct dentry *dir);
140#endif /* CONFIG_MAC80211_DEBUGFS */
132 141
133#ifdef CONFIG_ATH10K_DEBUG 142#ifdef CONFIG_ATH10K_DEBUG
134__printf(3, 4) void ath10k_dbg(struct ath10k *ar, 143__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
new file mode 100644
index 000000000000..95b5c49374e0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -0,0 +1,243 @@
1/*
2 * Copyright (c) 2014 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "wmi-ops.h"
19#include "debug.h"
20
21static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
22 char __user *user_buf,
23 size_t count, loff_t *ppos)
24{
25 struct ieee80211_sta *sta = file->private_data;
26 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
27 struct ath10k *ar = arsta->arvif->ar;
28 char buf[32];
29 int len = 0;
30
31 mutex_lock(&ar->conf_mutex);
32 len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
33 (arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
34 "auto" : "manual");
35 mutex_unlock(&ar->conf_mutex);
36
37 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
38}
39
40static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
41 const char __user *user_buf,
42 size_t count, loff_t *ppos)
43{
44 struct ieee80211_sta *sta = file->private_data;
45 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
46 struct ath10k *ar = arsta->arvif->ar;
47 u32 aggr_mode;
48 int ret;
49
50 if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
51 return -EINVAL;
52
53 if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
54 return -EINVAL;
55
56 mutex_lock(&ar->conf_mutex);
57 if ((ar->state != ATH10K_STATE_ON) ||
58 (aggr_mode == arsta->aggr_mode)) {
59 ret = count;
60 goto out;
61 }
62
63 ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
64 if (ret) {
65 ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
66 goto out;
67 }
68
69 arsta->aggr_mode = aggr_mode;
70out:
71 mutex_unlock(&ar->conf_mutex);
72 return ret;
73}
74
75static const struct file_operations fops_aggr_mode = {
76 .read = ath10k_dbg_sta_read_aggr_mode,
77 .write = ath10k_dbg_sta_write_aggr_mode,
78 .open = simple_open,
79 .owner = THIS_MODULE,
80 .llseek = default_llseek,
81};
82
83static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
84 const char __user *user_buf,
85 size_t count, loff_t *ppos)
86{
87 struct ieee80211_sta *sta = file->private_data;
88 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
89 struct ath10k *ar = arsta->arvif->ar;
90 u32 tid, buf_size;
91 int ret;
92 char buf[64];
93
94 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
95
96 /* make sure that buf is null terminated */
97 buf[sizeof(buf) - 1] = '\0';
98
99 ret = sscanf(buf, "%u %u", &tid, &buf_size);
100 if (ret != 2)
101 return -EINVAL;
102
103 /* Valid TID values are 0 through 15 */
104 if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
105 return -EINVAL;
106
107 mutex_lock(&ar->conf_mutex);
108 if ((ar->state != ATH10K_STATE_ON) ||
109 (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
110 ret = count;
111 goto out;
112 }
113
114 ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
115 tid, buf_size);
116 if (ret) {
117 ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
118 arsta->arvif->vdev_id, sta->addr, tid, buf_size);
119 }
120
121 ret = count;
122out:
123 mutex_unlock(&ar->conf_mutex);
124 return ret;
125}
126
127static const struct file_operations fops_addba = {
128 .write = ath10k_dbg_sta_write_addba,
129 .open = simple_open,
130 .owner = THIS_MODULE,
131 .llseek = default_llseek,
132};
133
134static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
135 const char __user *user_buf,
136 size_t count, loff_t *ppos)
137{
138 struct ieee80211_sta *sta = file->private_data;
139 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
140 struct ath10k *ar = arsta->arvif->ar;
141 u32 tid, status;
142 int ret;
143 char buf[64];
144
145 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
146
147 /* make sure that buf is null terminated */
148 buf[sizeof(buf) - 1] = '\0';
149
150 ret = sscanf(buf, "%u %u", &tid, &status);
151 if (ret != 2)
152 return -EINVAL;
153
154 /* Valid TID values are 0 through 15 */
155 if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
156 return -EINVAL;
157
158 mutex_lock(&ar->conf_mutex);
159 if ((ar->state != ATH10K_STATE_ON) ||
160 (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
161 ret = count;
162 goto out;
163 }
164
165 ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
166 tid, status);
167 if (ret) {
168 ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
169 arsta->arvif->vdev_id, sta->addr, tid, status);
170 }
171 ret = count;
172out:
173 mutex_unlock(&ar->conf_mutex);
174 return ret;
175}
176
177static const struct file_operations fops_addba_resp = {
178 .write = ath10k_dbg_sta_write_addba_resp,
179 .open = simple_open,
180 .owner = THIS_MODULE,
181 .llseek = default_llseek,
182};
183
184static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
185 const char __user *user_buf,
186 size_t count, loff_t *ppos)
187{
188 struct ieee80211_sta *sta = file->private_data;
189 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
190 struct ath10k *ar = arsta->arvif->ar;
191 u32 tid, initiator, reason;
192 int ret;
193 char buf[64];
194
195 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
196
197 /* make sure that buf is null terminated */
198 buf[sizeof(buf) - 1] = '\0';
199
200 ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
201 if (ret != 3)
202 return -EINVAL;
203
204 /* Valid TID values are 0 through 15 */
205 if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
206 return -EINVAL;
207
208 mutex_lock(&ar->conf_mutex);
209 if ((ar->state != ATH10K_STATE_ON) ||
210 (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
211 ret = count;
212 goto out;
213 }
214
215 ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
216 tid, initiator, reason);
217 if (ret) {
218 ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
219 arsta->arvif->vdev_id, sta->addr, tid, initiator,
220 reason);
221 }
222 ret = count;
223out:
224 mutex_unlock(&ar->conf_mutex);
225 return ret;
226}
227
228static const struct file_operations fops_delba = {
229 .write = ath10k_dbg_sta_write_delba,
230 .open = simple_open,
231 .owner = THIS_MODULE,
232 .llseek = default_llseek,
233};
234
235void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
236 struct ieee80211_sta *sta, struct dentry *dir)
237{
238 debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
239 &fops_aggr_mode);
240 debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
241 debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
242 debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
243}
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index f1946a6be442..2fd9e180272b 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -703,11 +703,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
703 /* wait for response */ 703 /* wait for response */
704 status = wait_for_completion_timeout(&htc->ctl_resp, 704 status = wait_for_completion_timeout(&htc->ctl_resp,
705 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); 705 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
706 if (status <= 0) { 706 if (status == 0) {
707 if (status == 0)
708 status = -ETIMEDOUT;
709 ath10k_err(ar, "Service connect timeout: %d\n", status); 707 ath10k_err(ar, "Service connect timeout: %d\n", status);
710 return status; 708 return -ETIMEDOUT;
711 } 709 }
712 710
713 /* we controlled the buffer creation, it's aligned */ 711 /* we controlled the buffer creation, it's aligned */
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 56cb4aceb383..4f59ab923e48 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -53,7 +53,6 @@ int ath10k_htt_init(struct ath10k *ar)
53 struct ath10k_htt *htt = &ar->htt; 53 struct ath10k_htt *htt = &ar->htt;
54 54
55 htt->ar = ar; 55 htt->ar = ar;
56 htt->max_throughput_mbps = 800;
57 56
58 /* 57 /*
59 * Prefetch enough data to satisfy target 58 * Prefetch enough data to satisfy target
@@ -102,7 +101,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
102 101
103 status = wait_for_completion_timeout(&htt->target_version_received, 102 status = wait_for_completion_timeout(&htt->target_version_received,
104 HTT_TARGET_VERSION_TIMEOUT_HZ); 103 HTT_TARGET_VERSION_TIMEOUT_HZ);
105 if (status <= 0) { 104 if (status == 0) {
106 ath10k_warn(ar, "htt version request timed out\n"); 105 ath10k_warn(ar, "htt version request timed out\n");
107 return -ETIMEDOUT; 106 return -ETIMEDOUT;
108 } 107 }
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 1bd5545af903..874bf44ff7a2 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -21,6 +21,7 @@
21#include <linux/bug.h> 21#include <linux/bug.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/dmapool.h> 23#include <linux/dmapool.h>
24#include <linux/hashtable.h>
24#include <net/mac80211.h> 25#include <net/mac80211.h>
25 26
26#include "htc.h" 27#include "htc.h"
@@ -286,7 +287,19 @@ enum htt_t2h_msg_type {
286 HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, 287 HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
287 HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, 288 HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
288 HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe, 289 HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
290 HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
291 HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
292 HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
293 HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
294 /* 0x13 reservd */
295 HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
296
297 /* FIXME: Do not depend on this event id. Numbering of this event id is
298 * broken across different firmware revisions and HTT version fails to
299 * indicate this.
300 */
289 HTT_T2H_MSG_TYPE_TEST, 301 HTT_T2H_MSG_TYPE_TEST,
302
290 /* keep this last */ 303 /* keep this last */
291 HTT_T2H_NUM_MSGS 304 HTT_T2H_NUM_MSGS
292}; 305};
@@ -655,6 +668,53 @@ struct htt_rx_fragment_indication {
655#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 668#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
656#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 669#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
657 670
671struct htt_rx_pn_ind {
672 __le16 peer_id;
673 u8 tid;
674 u8 seqno_start;
675 u8 seqno_end;
676 u8 pn_ie_count;
677 u8 reserved;
678 u8 pn_ies[0];
679} __packed;
680
681struct htt_rx_offload_msdu {
682 __le16 msdu_len;
683 __le16 peer_id;
684 u8 vdev_id;
685 u8 tid;
686 u8 fw_desc;
687 u8 payload[0];
688} __packed;
689
690struct htt_rx_offload_ind {
691 u8 reserved;
692 __le16 msdu_count;
693} __packed;
694
695struct htt_rx_in_ord_msdu_desc {
696 __le32 msdu_paddr;
697 __le16 msdu_len;
698 u8 fw_desc;
699 u8 reserved;
700} __packed;
701
702struct htt_rx_in_ord_ind {
703 u8 info;
704 __le16 peer_id;
705 u8 vdev_id;
706 u8 reserved;
707 __le16 msdu_count;
708 struct htt_rx_in_ord_msdu_desc msdu_descs[0];
709} __packed;
710
711#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
712#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
713#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
714#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
715#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
716#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
717
658/* 718/*
659 * target -> host test message definition 719 * target -> host test message definition
660 * 720 *
@@ -1150,6 +1210,9 @@ struct htt_resp {
1150 struct htt_rx_test rx_test; 1210 struct htt_rx_test rx_test;
1151 struct htt_pktlog_msg pktlog_msg; 1211 struct htt_pktlog_msg pktlog_msg;
1152 struct htt_stats_conf stats_conf; 1212 struct htt_stats_conf stats_conf;
1213 struct htt_rx_pn_ind rx_pn_ind;
1214 struct htt_rx_offload_ind rx_offload_ind;
1215 struct htt_rx_in_ord_ind rx_in_ord_ind;
1153 }; 1216 };
1154} __packed; 1217} __packed;
1155 1218
@@ -1182,7 +1245,6 @@ struct ath10k_htt {
1182 struct ath10k *ar; 1245 struct ath10k *ar;
1183 enum ath10k_htc_ep_id eid; 1246 enum ath10k_htc_ep_id eid;
1184 1247
1185 int max_throughput_mbps;
1186 u8 target_version_major; 1248 u8 target_version_major;
1187 u8 target_version_minor; 1249 u8 target_version_minor;
1188 struct completion target_version_received; 1250 struct completion target_version_received;
@@ -1198,6 +1260,20 @@ struct ath10k_htt {
1198 * filled. 1260 * filled.
1199 */ 1261 */
1200 struct sk_buff **netbufs_ring; 1262 struct sk_buff **netbufs_ring;
1263
1264 /* This is used only with firmware supporting IN_ORD_IND.
1265 *
1266 * With Full Rx Reorder the HTT Rx Ring is more of a temporary
1267 * buffer ring from which buffer addresses are copied by the
1268 * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
1269 * pointing to specific (re-ordered) buffers.
1270 *
1271 * FIXME: With kernel generic hashing functions there's a lot
1272 * of hash collisions for sk_buffs.
1273 */
1274 bool in_ord_rx;
1275 DECLARE_HASHTABLE(skb_table, 4);
1276
1201 /* 1277 /*
1202 * Ring of buffer addresses - 1278 * Ring of buffer addresses -
1203 * This ring holds the "physical" device address of the 1279 * This ring holds the "physical" device address of the
@@ -1252,12 +1328,11 @@ struct ath10k_htt {
1252 1328
1253 unsigned int prefetch_len; 1329 unsigned int prefetch_len;
1254 1330
1255 /* Protects access to %pending_tx, %used_msdu_ids */ 1331 /* Protects access to pending_tx, num_pending_tx */
1256 spinlock_t tx_lock; 1332 spinlock_t tx_lock;
1257 int max_num_pending_tx; 1333 int max_num_pending_tx;
1258 int num_pending_tx; 1334 int num_pending_tx;
1259 struct sk_buff **pending_tx; 1335 struct idr pending_tx;
1260 unsigned long *used_msdu_ids; /* bitmap */
1261 wait_queue_head_t empty_tx_wq; 1336 wait_queue_head_t empty_tx_wq;
1262 struct dma_pool *tx_pool; 1337 struct dma_pool *tx_pool;
1263 1338
@@ -1271,6 +1346,7 @@ struct ath10k_htt {
1271 struct tasklet_struct txrx_compl_task; 1346 struct tasklet_struct txrx_compl_task;
1272 struct sk_buff_head tx_compl_q; 1347 struct sk_buff_head tx_compl_q;
1273 struct sk_buff_head rx_compl_q; 1348 struct sk_buff_head rx_compl_q;
1349 struct sk_buff_head rx_in_ord_compl_q;
1274 1350
1275 /* rx_status template */ 1351 /* rx_status template */
1276 struct ieee80211_rx_status rx_status; 1352 struct ieee80211_rx_status rx_status;
@@ -1334,6 +1410,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
1334void ath10k_htt_tx_free(struct ath10k_htt *htt); 1410void ath10k_htt_tx_free(struct ath10k_htt *htt);
1335 1411
1336int ath10k_htt_rx_alloc(struct ath10k_htt *htt); 1412int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
1413int ath10k_htt_rx_ring_refill(struct ath10k *ar);
1337void ath10k_htt_rx_free(struct ath10k_htt *htt); 1414void ath10k_htt_rx_free(struct ath10k_htt *htt);
1338 1415
1339void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); 1416void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
@@ -1346,7 +1423,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
1346 u8 max_subfrms_amsdu); 1423 u8 max_subfrms_amsdu);
1347 1424
1348void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); 1425void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
1349int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt); 1426int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
1350void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); 1427void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
1351int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); 1428int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
1352int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); 1429int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 9c782a42665e..c1da44f65a4d 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -25,8 +25,8 @@
25 25
26#include <linux/log2.h> 26#include <linux/log2.h>
27 27
28#define HTT_RX_RING_SIZE 1024 28#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
29#define HTT_RX_RING_FILL_LEVEL 1000 29#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
30 30
31/* when under memory pressure rx ring refill may fail and needs a retry */ 31/* when under memory pressure rx ring refill may fail and needs a retry */
32#define HTT_RX_RING_REFILL_RETRY_MS 50 32#define HTT_RX_RING_REFILL_RETRY_MS 50
@@ -34,31 +34,70 @@
34static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); 34static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
35static void ath10k_htt_txrx_compl_task(unsigned long ptr); 35static void ath10k_htt_txrx_compl_task(unsigned long ptr);
36 36
37static struct sk_buff *
38ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
39{
40 struct ath10k_skb_rxcb *rxcb;
41
42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
43 if (rxcb->paddr == paddr)
44 return ATH10K_RXCB_SKB(rxcb);
45
46 WARN_ON_ONCE(1);
47 return NULL;
48}
49
37static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 50static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
38{ 51{
39 struct sk_buff *skb; 52 struct sk_buff *skb;
40 struct ath10k_skb_cb *cb; 53 struct ath10k_skb_rxcb *rxcb;
54 struct hlist_node *n;
41 int i; 55 int i;
42 56
43 for (i = 0; i < htt->rx_ring.fill_cnt; i++) { 57 if (htt->rx_ring.in_ord_rx) {
44 skb = htt->rx_ring.netbufs_ring[i]; 58 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
45 cb = ATH10K_SKB_CB(skb); 59 skb = ATH10K_RXCB_SKB(rxcb);
46 dma_unmap_single(htt->ar->dev, cb->paddr, 60 dma_unmap_single(htt->ar->dev, rxcb->paddr,
47 skb->len + skb_tailroom(skb), 61 skb->len + skb_tailroom(skb),
48 DMA_FROM_DEVICE); 62 DMA_FROM_DEVICE);
49 dev_kfree_skb_any(skb); 63 hash_del(&rxcb->hlist);
64 dev_kfree_skb_any(skb);
65 }
66 } else {
67 for (i = 0; i < htt->rx_ring.size; i++) {
68 skb = htt->rx_ring.netbufs_ring[i];
69 if (!skb)
70 continue;
71
72 rxcb = ATH10K_SKB_RXCB(skb);
73 dma_unmap_single(htt->ar->dev, rxcb->paddr,
74 skb->len + skb_tailroom(skb),
75 DMA_FROM_DEVICE);
76 dev_kfree_skb_any(skb);
77 }
50 } 78 }
51 79
52 htt->rx_ring.fill_cnt = 0; 80 htt->rx_ring.fill_cnt = 0;
81 hash_init(htt->rx_ring.skb_table);
82 memset(htt->rx_ring.netbufs_ring, 0,
83 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
53} 84}
54 85
55static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 86static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
56{ 87{
57 struct htt_rx_desc *rx_desc; 88 struct htt_rx_desc *rx_desc;
89 struct ath10k_skb_rxcb *rxcb;
58 struct sk_buff *skb; 90 struct sk_buff *skb;
59 dma_addr_t paddr; 91 dma_addr_t paddr;
60 int ret = 0, idx; 92 int ret = 0, idx;
61 93
94 /* The Full Rx Reorder firmware has no way of telling the host
95 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
96 * To keep things simple make sure ring is always half empty. This
97 * guarantees there'll be no replenishment overruns possible.
98 */
99 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
100
62 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 101 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
63 while (num > 0) { 102 while (num > 0) {
64 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 103 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
@@ -86,17 +125,29 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
86 goto fail; 125 goto fail;
87 } 126 }
88 127
89 ATH10K_SKB_CB(skb)->paddr = paddr; 128 rxcb = ATH10K_SKB_RXCB(skb);
129 rxcb->paddr = paddr;
90 htt->rx_ring.netbufs_ring[idx] = skb; 130 htt->rx_ring.netbufs_ring[idx] = skb;
91 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); 131 htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
92 htt->rx_ring.fill_cnt++; 132 htt->rx_ring.fill_cnt++;
93 133
134 if (htt->rx_ring.in_ord_rx) {
135 hash_add(htt->rx_ring.skb_table,
136 &ATH10K_SKB_RXCB(skb)->hlist,
137 (u32)paddr);
138 }
139
94 num--; 140 num--;
95 idx++; 141 idx++;
96 idx &= htt->rx_ring.size_mask; 142 idx &= htt->rx_ring.size_mask;
97 } 143 }
98 144
99fail: 145fail:
146 /*
147 * Make sure the rx buffer is updated before available buffer
148 * index to avoid any potential rx ring corruption.
149 */
150 mb();
100 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); 151 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
101 return ret; 152 return ret;
102} 153}
@@ -153,22 +204,20 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
153 ath10k_htt_rx_msdu_buff_replenish(htt); 204 ath10k_htt_rx_msdu_buff_replenish(htt);
154} 205}
155 206
156static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt) 207int ath10k_htt_rx_ring_refill(struct ath10k *ar)
157{ 208{
158 struct sk_buff *skb; 209 struct ath10k_htt *htt = &ar->htt;
159 int i; 210 int ret;
160 211
161 for (i = 0; i < htt->rx_ring.size; i++) { 212 spin_lock_bh(&htt->rx_ring.lock);
162 skb = htt->rx_ring.netbufs_ring[i]; 213 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
163 if (!skb) 214 htt->rx_ring.fill_cnt));
164 continue; 215 spin_unlock_bh(&htt->rx_ring.lock);
165 216
166 dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr, 217 if (ret)
167 skb->len + skb_tailroom(skb), 218 ath10k_htt_rx_ring_free(htt);
168 DMA_FROM_DEVICE); 219
169 dev_kfree_skb_any(skb); 220 return ret;
170 htt->rx_ring.netbufs_ring[i] = NULL;
171 }
172} 221}
173 222
174void ath10k_htt_rx_free(struct ath10k_htt *htt) 223void ath10k_htt_rx_free(struct ath10k_htt *htt)
@@ -179,8 +228,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
179 228
180 skb_queue_purge(&htt->tx_compl_q); 229 skb_queue_purge(&htt->tx_compl_q);
181 skb_queue_purge(&htt->rx_compl_q); 230 skb_queue_purge(&htt->rx_compl_q);
231 skb_queue_purge(&htt->rx_in_ord_compl_q);
182 232
183 ath10k_htt_rx_ring_clean_up(htt); 233 ath10k_htt_rx_ring_free(htt);
184 234
185 dma_free_coherent(htt->ar->dev, 235 dma_free_coherent(htt->ar->dev,
186 (htt->rx_ring.size * 236 (htt->rx_ring.size *
@@ -212,6 +262,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
212 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 262 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
213 msdu = htt->rx_ring.netbufs_ring[idx]; 263 msdu = htt->rx_ring.netbufs_ring[idx];
214 htt->rx_ring.netbufs_ring[idx] = NULL; 264 htt->rx_ring.netbufs_ring[idx] = NULL;
265 htt->rx_ring.paddrs_ring[idx] = 0;
215 266
216 idx++; 267 idx++;
217 idx &= htt->rx_ring.size_mask; 268 idx &= htt->rx_ring.size_mask;
@@ -219,7 +270,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
219 htt->rx_ring.fill_cnt--; 270 htt->rx_ring.fill_cnt--;
220 271
221 dma_unmap_single(htt->ar->dev, 272 dma_unmap_single(htt->ar->dev,
222 ATH10K_SKB_CB(msdu)->paddr, 273 ATH10K_SKB_RXCB(msdu)->paddr,
223 msdu->len + skb_tailroom(msdu), 274 msdu->len + skb_tailroom(msdu),
224 DMA_FROM_DEVICE); 275 DMA_FROM_DEVICE);
225 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 276 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
@@ -379,6 +430,82 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
379 ath10k_htt_rx_msdu_buff_replenish(htt); 430 ath10k_htt_rx_msdu_buff_replenish(htt);
380} 431}
381 432
433static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
434 u32 paddr)
435{
436 struct ath10k *ar = htt->ar;
437 struct ath10k_skb_rxcb *rxcb;
438 struct sk_buff *msdu;
439
440 lockdep_assert_held(&htt->rx_ring.lock);
441
442 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
443 if (!msdu)
444 return NULL;
445
446 rxcb = ATH10K_SKB_RXCB(msdu);
447 hash_del(&rxcb->hlist);
448 htt->rx_ring.fill_cnt--;
449
450 dma_unmap_single(htt->ar->dev, rxcb->paddr,
451 msdu->len + skb_tailroom(msdu),
452 DMA_FROM_DEVICE);
453 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
454 msdu->data, msdu->len + skb_tailroom(msdu));
455
456 return msdu;
457}
458
459static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
460 struct htt_rx_in_ord_ind *ev,
461 struct sk_buff_head *list)
462{
463 struct ath10k *ar = htt->ar;
464 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
465 struct htt_rx_desc *rxd;
466 struct sk_buff *msdu;
467 int msdu_count;
468 bool is_offload;
469 u32 paddr;
470
471 lockdep_assert_held(&htt->rx_ring.lock);
472
473 msdu_count = __le16_to_cpu(ev->msdu_count);
474 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
475
476 while (msdu_count--) {
477 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
478
479 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
480 if (!msdu) {
481 __skb_queue_purge(list);
482 return -ENOENT;
483 }
484
485 __skb_queue_tail(list, msdu);
486
487 if (!is_offload) {
488 rxd = (void *)msdu->data;
489
490 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
491
492 skb_put(msdu, sizeof(*rxd));
493 skb_pull(msdu, sizeof(*rxd));
494 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
495
496 if (!(__le32_to_cpu(rxd->attention.flags) &
497 RX_ATTENTION_FLAGS_MSDU_DONE)) {
498 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
499 return -EIO;
500 }
501 }
502
503 msdu_desc++;
504 }
505
506 return 0;
507}
508
382int ath10k_htt_rx_alloc(struct ath10k_htt *htt) 509int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
383{ 510{
384 struct ath10k *ar = htt->ar; 511 struct ath10k *ar = htt->ar;
@@ -424,7 +551,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
424 551
425 htt->rx_ring.alloc_idx.vaddr = vaddr; 552 htt->rx_ring.alloc_idx.vaddr = vaddr;
426 htt->rx_ring.alloc_idx.paddr = paddr; 553 htt->rx_ring.alloc_idx.paddr = paddr;
427 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 554 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
428 *htt->rx_ring.alloc_idx.vaddr = 0; 555 *htt->rx_ring.alloc_idx.vaddr = 0;
429 556
430 /* Initialize the Rx refill retry timer */ 557 /* Initialize the Rx refill retry timer */
@@ -433,14 +560,15 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
433 spin_lock_init(&htt->rx_ring.lock); 560 spin_lock_init(&htt->rx_ring.lock);
434 561
435 htt->rx_ring.fill_cnt = 0; 562 htt->rx_ring.fill_cnt = 0;
436 if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) 563 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
437 goto err_fill_ring; 564 hash_init(htt->rx_ring.skb_table);
438 565
439 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, 566 tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
440 (unsigned long)htt); 567 (unsigned long)htt);
441 568
442 skb_queue_head_init(&htt->tx_compl_q); 569 skb_queue_head_init(&htt->tx_compl_q);
443 skb_queue_head_init(&htt->rx_compl_q); 570 skb_queue_head_init(&htt->rx_compl_q);
571 skb_queue_head_init(&htt->rx_in_ord_compl_q);
444 572
445 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, 573 tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
446 (unsigned long)htt); 574 (unsigned long)htt);
@@ -449,12 +577,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
449 htt->rx_ring.size, htt->rx_ring.fill_level); 577 htt->rx_ring.size, htt->rx_ring.fill_level);
450 return 0; 578 return 0;
451 579
452err_fill_ring:
453 ath10k_htt_rx_ring_free(htt);
454 dma_free_coherent(htt->ar->dev,
455 sizeof(*htt->rx_ring.alloc_idx.vaddr),
456 htt->rx_ring.alloc_idx.vaddr,
457 htt->rx_ring.alloc_idx.paddr);
458err_dma_idx: 580err_dma_idx:
459 dma_free_coherent(htt->ar->dev, 581 dma_free_coherent(htt->ar->dev,
460 (htt->rx_ring.size * 582 (htt->rx_ring.size *
@@ -691,7 +813,7 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
691 * 813 *
692 * FIXME: Can we get/compute 64bit TSF? 814 * FIXME: Can we get/compute 64bit TSF?
693 */ 815 */
694 status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp); 816 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
695 status->flag |= RX_FLAG_MACTIME_END; 817 status->flag |= RX_FLAG_MACTIME_END;
696} 818}
697 819
@@ -1578,6 +1700,194 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1578 spin_unlock_bh(&ar->data_lock); 1700 spin_unlock_bh(&ar->data_lock);
1579} 1701}
1580 1702
1703static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1704 struct sk_buff_head *amsdu)
1705{
1706 struct sk_buff *msdu;
1707 struct htt_rx_desc *rxd;
1708
1709 if (skb_queue_empty(list))
1710 return -ENOBUFS;
1711
1712 if (WARN_ON(!skb_queue_empty(amsdu)))
1713 return -EINVAL;
1714
1715 while ((msdu = __skb_dequeue(list))) {
1716 __skb_queue_tail(amsdu, msdu);
1717
1718 rxd = (void *)msdu->data - sizeof(*rxd);
1719 if (rxd->msdu_end.info0 &
1720 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
1721 break;
1722 }
1723
1724 msdu = skb_peek_tail(amsdu);
1725 rxd = (void *)msdu->data - sizeof(*rxd);
1726 if (!(rxd->msdu_end.info0 &
1727 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
1728 skb_queue_splice_init(amsdu, list);
1729 return -EAGAIN;
1730 }
1731
1732 return 0;
1733}
1734
1735static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
1736 struct sk_buff *skb)
1737{
1738 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1739
1740 if (!ieee80211_has_protected(hdr->frame_control))
1741 return;
1742
1743 /* Offloaded frames are already decrypted but firmware insists they are
1744 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
1745 * will drop the frame.
1746 */
1747
1748 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1749 status->flag |= RX_FLAG_DECRYPTED |
1750 RX_FLAG_IV_STRIPPED |
1751 RX_FLAG_MMIC_STRIPPED;
1752}
1753
1754static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1755 struct sk_buff_head *list)
1756{
1757 struct ath10k_htt *htt = &ar->htt;
1758 struct ieee80211_rx_status *status = &htt->rx_status;
1759 struct htt_rx_offload_msdu *rx;
1760 struct sk_buff *msdu;
1761 size_t offset;
1762
1763 while ((msdu = __skb_dequeue(list))) {
1764 /* Offloaded frames don't have Rx descriptor. Instead they have
1765 * a short meta information header.
1766 */
1767
1768 rx = (void *)msdu->data;
1769
1770 skb_put(msdu, sizeof(*rx));
1771 skb_pull(msdu, sizeof(*rx));
1772
1773 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
1774 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
1775 dev_kfree_skb_any(msdu);
1776 continue;
1777 }
1778
1779 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
1780
1781 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
1782 * actual payload is unaligned. Align the frame. Otherwise
1783 * mac80211 complains. This shouldn't reduce performance much
1784 * because these offloaded frames are rare.
1785 */
1786 offset = 4 - ((unsigned long)msdu->data & 3);
1787 skb_put(msdu, offset);
1788 memmove(msdu->data + offset, msdu->data, msdu->len);
1789 skb_pull(msdu, offset);
1790
1791 /* FIXME: The frame is NWifi. Re-construct QoS Control
1792 * if possible later.
1793 */
1794
1795 memset(status, 0, sizeof(*status));
1796 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1797
1798 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
1799 ath10k_htt_rx_h_channel(ar, status);
1800 ath10k_process_rx(ar, status, msdu);
1801 }
1802}
1803
1804static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1805{
1806 struct ath10k_htt *htt = &ar->htt;
1807 struct htt_resp *resp = (void *)skb->data;
1808 struct ieee80211_rx_status *status = &htt->rx_status;
1809 struct sk_buff_head list;
1810 struct sk_buff_head amsdu;
1811 u16 peer_id;
1812 u16 msdu_count;
1813 u8 vdev_id;
1814 u8 tid;
1815 bool offload;
1816 bool frag;
1817 int ret;
1818
1819 lockdep_assert_held(&htt->rx_ring.lock);
1820
1821 if (htt->rx_confused)
1822 return;
1823
1824 skb_pull(skb, sizeof(resp->hdr));
1825 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
1826
1827 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
1828 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
1829 vdev_id = resp->rx_in_ord_ind.vdev_id;
1830 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
1831 offload = !!(resp->rx_in_ord_ind.info &
1832 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
1833 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
1834
1835 ath10k_dbg(ar, ATH10K_DBG_HTT,
1836 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
1837 vdev_id, peer_id, tid, offload, frag, msdu_count);
1838
1839 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
1840 ath10k_warn(ar, "dropping invalid in order rx indication\n");
1841 return;
1842 }
1843
1844 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
1845 * extracted and processed.
1846 */
1847 __skb_queue_head_init(&list);
1848 ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
1849 if (ret < 0) {
1850 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
1851 htt->rx_confused = true;
1852 return;
1853 }
1854
1855 /* Offloaded frames are very different and need to be handled
1856 * separately.
1857 */
1858 if (offload)
1859 ath10k_htt_rx_h_rx_offload(ar, &list);
1860
1861 while (!skb_queue_empty(&list)) {
1862 __skb_queue_head_init(&amsdu);
1863 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1864 switch (ret) {
1865 case 0:
1866 /* Note: The in-order indication may report interleaved
1867 * frames from different PPDUs meaning reported rx rate
1868 * to mac80211 isn't accurate/reliable. It's still
1869 * better to report something than nothing though. This
1870 * should still give an idea about rx rate to the user.
1871 */
1872 ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
1873 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1874 ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
1875 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1876 break;
1877 case -EAGAIN:
1878 /* fall through */
1879 default:
1880 /* Should not happen. */
1881 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
1882 htt->rx_confused = true;
1883 __skb_queue_purge(&list);
1884 return;
1885 }
1886 }
1887
1888 tasklet_schedule(&htt->rx_replenish_task);
1889}
1890
1581void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 1891void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1582{ 1892{
1583 struct ath10k_htt *htt = &ar->htt; 1893 struct ath10k_htt *htt = &ar->htt;
@@ -1700,6 +2010,20 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1700 */ 2010 */
1701 break; 2011 break;
1702 } 2012 }
2013 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2014 spin_lock_bh(&htt->rx_ring.lock);
2015 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2016 spin_unlock_bh(&htt->rx_ring.lock);
2017 tasklet_schedule(&htt->txrx_compl_task);
2018 return;
2019 }
2020 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2021 /* FIXME: This WMI-TLV event is overlapping with 10.2
2022 * CHAN_CHANGE - both being 0xF. Neither is being used in
2023 * practice so no immediate action is necessary. Nevertheless
2024 * HTT may need an abstraction layer like WMI has one day.
2025 */
2026 break;
1703 default: 2027 default:
1704 ath10k_warn(ar, "htt event (%d) not handled\n", 2028 ath10k_warn(ar, "htt event (%d) not handled\n",
1705 resp->hdr.msg_type); 2029 resp->hdr.msg_type);
@@ -1715,6 +2039,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
1715static void ath10k_htt_txrx_compl_task(unsigned long ptr) 2039static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1716{ 2040{
1717 struct ath10k_htt *htt = (struct ath10k_htt *)ptr; 2041 struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
2042 struct ath10k *ar = htt->ar;
1718 struct htt_resp *resp; 2043 struct htt_resp *resp;
1719 struct sk_buff *skb; 2044 struct sk_buff *skb;
1720 2045
@@ -1731,5 +2056,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
1731 ath10k_htt_rx_handler(htt, &resp->rx_ind); 2056 ath10k_htt_rx_handler(htt, &resp->rx_ind);
1732 dev_kfree_skb_any(skb); 2057 dev_kfree_skb_any(skb);
1733 } 2058 }
2059
2060 while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
2061 ath10k_htt_rx_in_ord_ind(ar, skb);
2062 dev_kfree_skb_any(skb);
2063 }
1734 spin_unlock_bh(&htt->rx_ring.lock); 2064 spin_unlock_bh(&htt->rx_ring.lock);
1735} 2065}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 4bc51d8a14a3..cbd2bc9e6202 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -56,21 +56,18 @@ exit:
56 return ret; 56 return ret;
57} 57}
58 58
59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) 59int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
60{ 60{
61 struct ath10k *ar = htt->ar; 61 struct ath10k *ar = htt->ar;
62 int msdu_id; 62 int ret;
63 63
64 lockdep_assert_held(&htt->tx_lock); 64 lockdep_assert_held(&htt->tx_lock);
65 65
66 msdu_id = find_first_zero_bit(htt->used_msdu_ids, 66 ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
67 htt->max_num_pending_tx); 67
68 if (msdu_id == htt->max_num_pending_tx) 68 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
69 return -ENOBUFS;
70 69
71 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); 70 return ret;
72 __set_bit(msdu_id, htt->used_msdu_ids);
73 return msdu_id;
74} 71}
75 72
76void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) 73void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
@@ -79,79 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
79 76
80 lockdep_assert_held(&htt->tx_lock); 77 lockdep_assert_held(&htt->tx_lock);
81 78
82 if (!test_bit(msdu_id, htt->used_msdu_ids))
83 ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
84 msdu_id);
85
86 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); 79 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
87 __clear_bit(msdu_id, htt->used_msdu_ids); 80
81 idr_remove(&htt->pending_tx, msdu_id);
88} 82}
89 83
90int ath10k_htt_tx_alloc(struct ath10k_htt *htt) 84int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
91{ 85{
92 struct ath10k *ar = htt->ar; 86 struct ath10k *ar = htt->ar;
93 87
94 spin_lock_init(&htt->tx_lock);
95
96 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
97 htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
98 else
99 htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
100
101 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", 88 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
102 htt->max_num_pending_tx); 89 htt->max_num_pending_tx);
103 90
104 htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * 91 spin_lock_init(&htt->tx_lock);
105 htt->max_num_pending_tx, GFP_KERNEL); 92 idr_init(&htt->pending_tx);
106 if (!htt->pending_tx)
107 return -ENOMEM;
108
109 htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
110 BITS_TO_LONGS(htt->max_num_pending_tx),
111 GFP_KERNEL);
112 if (!htt->used_msdu_ids) {
113 kfree(htt->pending_tx);
114 return -ENOMEM;
115 }
116 93
117 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, 94 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
118 sizeof(struct ath10k_htt_txbuf), 4, 0); 95 sizeof(struct ath10k_htt_txbuf), 4, 0);
119 if (!htt->tx_pool) { 96 if (!htt->tx_pool) {
120 kfree(htt->used_msdu_ids); 97 idr_destroy(&htt->pending_tx);
121 kfree(htt->pending_tx);
122 return -ENOMEM; 98 return -ENOMEM;
123 } 99 }
124 100
125 return 0; 101 return 0;
126} 102}
127 103
128static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) 104static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
129{ 105{
130 struct ath10k *ar = htt->ar; 106 struct ath10k *ar = ctx;
107 struct ath10k_htt *htt = &ar->htt;
131 struct htt_tx_done tx_done = {0}; 108 struct htt_tx_done tx_done = {0};
132 int msdu_id;
133
134 spin_lock_bh(&htt->tx_lock);
135 for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
136 if (!test_bit(msdu_id, htt->used_msdu_ids))
137 continue;
138 109
139 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", 110 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
140 msdu_id);
141 111
142 tx_done.discard = 1; 112 tx_done.discard = 1;
143 tx_done.msdu_id = msdu_id; 113 tx_done.msdu_id = msdu_id;
144 114
145 ath10k_txrx_tx_unref(htt, &tx_done); 115 spin_lock_bh(&htt->tx_lock);
146 } 116 ath10k_txrx_tx_unref(htt, &tx_done);
147 spin_unlock_bh(&htt->tx_lock); 117 spin_unlock_bh(&htt->tx_lock);
118
119 return 0;
148} 120}
149 121
150void ath10k_htt_tx_free(struct ath10k_htt *htt) 122void ath10k_htt_tx_free(struct ath10k_htt *htt)
151{ 123{
152 ath10k_htt_tx_free_pending(htt); 124 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
153 kfree(htt->pending_tx); 125 idr_destroy(&htt->pending_tx);
154 kfree(htt->used_msdu_ids);
155 dma_pool_destroy(htt->tx_pool); 126 dma_pool_destroy(htt->tx_pool);
156} 127}
157 128
@@ -383,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
383 len += sizeof(cmd->mgmt_tx); 354 len += sizeof(cmd->mgmt_tx);
384 355
385 spin_lock_bh(&htt->tx_lock); 356 spin_lock_bh(&htt->tx_lock);
386 res = ath10k_htt_tx_alloc_msdu_id(htt); 357 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
387 if (res < 0) { 358 if (res < 0) {
388 spin_unlock_bh(&htt->tx_lock); 359 spin_unlock_bh(&htt->tx_lock);
389 goto err_tx_dec; 360 goto err_tx_dec;
390 } 361 }
391 msdu_id = res; 362 msdu_id = res;
392 htt->pending_tx[msdu_id] = msdu;
393 spin_unlock_bh(&htt->tx_lock); 363 spin_unlock_bh(&htt->tx_lock);
394 364
395 txdesc = ath10k_htc_alloc_skb(ar, len); 365 txdesc = ath10k_htc_alloc_skb(ar, len);
@@ -428,7 +398,6 @@ err_free_txdesc:
428 dev_kfree_skb_any(txdesc); 398 dev_kfree_skb_any(txdesc);
429err_free_msdu_id: 399err_free_msdu_id:
430 spin_lock_bh(&htt->tx_lock); 400 spin_lock_bh(&htt->tx_lock);
431 htt->pending_tx[msdu_id] = NULL;
432 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 401 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
433 spin_unlock_bh(&htt->tx_lock); 402 spin_unlock_bh(&htt->tx_lock);
434err_tx_dec: 403err_tx_dec:
@@ -460,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
460 goto err; 429 goto err;
461 430
462 spin_lock_bh(&htt->tx_lock); 431 spin_lock_bh(&htt->tx_lock);
463 res = ath10k_htt_tx_alloc_msdu_id(htt); 432 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
464 if (res < 0) { 433 if (res < 0) {
465 spin_unlock_bh(&htt->tx_lock); 434 spin_unlock_bh(&htt->tx_lock);
466 goto err_tx_dec; 435 goto err_tx_dec;
467 } 436 }
468 msdu_id = res; 437 msdu_id = res;
469 htt->pending_tx[msdu_id] = msdu;
470 spin_unlock_bh(&htt->tx_lock); 438 spin_unlock_bh(&htt->tx_lock);
471 439
472 prefetch_len = min(htt->prefetch_len, msdu->len); 440 prefetch_len = min(htt->prefetch_len, msdu->len);
@@ -480,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
480 448
481 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, 449 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
482 &paddr); 450 &paddr);
483 if (!skb_cb->htt.txbuf) 451 if (!skb_cb->htt.txbuf) {
452 res = -ENOMEM;
484 goto err_free_msdu_id; 453 goto err_free_msdu_id;
454 }
485 skb_cb->htt.txbuf_paddr = paddr; 455 skb_cb->htt.txbuf_paddr = paddr;
486 456
457 if ((ieee80211_is_action(hdr->frame_control) ||
458 ieee80211_is_deauth(hdr->frame_control) ||
459 ieee80211_is_disassoc(hdr->frame_control)) &&
460 ieee80211_has_protected(hdr->frame_control))
461 skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
462
487 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 463 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
488 DMA_TO_DEVICE); 464 DMA_TO_DEVICE);
489 res = dma_mapping_error(dev, skb_cb->paddr); 465 res = dma_mapping_error(dev, skb_cb->paddr);
@@ -539,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
539 515
540 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 516 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
541 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 517 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
542 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 518 if (msdu->ip_summed == CHECKSUM_PARTIAL) {
543 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 519 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
520 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
521 }
544 522
545 /* Prevent firmware from sending up tx inspection requests. There's 523 /* Prevent firmware from sending up tx inspection requests. There's
546 * nothing ath10k can do with frames requested for inspection so force 524 * nothing ath10k can do with frames requested for inspection so force
@@ -598,7 +576,6 @@ err_free_txbuf:
598 skb_cb->htt.txbuf_paddr); 576 skb_cb->htt.txbuf_paddr);
599err_free_msdu_id: 577err_free_msdu_id:
600 spin_lock_bh(&htt->tx_lock); 578 spin_lock_bh(&htt->tx_lock);
601 htt->pending_tx[msdu_id] = NULL;
602 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 579 ath10k_htt_tx_free_msdu_id(htt, msdu_id);
603 spin_unlock_bh(&htt->tx_lock); 580 spin_unlock_bh(&htt->tx_lock);
604err_tx_dec: 581err_tx_dec:
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
new file mode 100644
index 000000000000..839a8791fb9e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/types.h>
18#include "hw.h"
19
20const struct ath10k_hw_regs qca988x_regs = {
21 .rtc_state_cold_reset_mask = 0x00000400,
22 .rtc_soc_base_address = 0x00004000,
23 .rtc_wmac_base_address = 0x00005000,
24 .soc_core_base_address = 0x00009000,
25 .ce_wrapper_base_address = 0x00057000,
26 .ce0_base_address = 0x00057400,
27 .ce1_base_address = 0x00057800,
28 .ce2_base_address = 0x00057c00,
29 .ce3_base_address = 0x00058000,
30 .ce4_base_address = 0x00058400,
31 .ce5_base_address = 0x00058800,
32 .ce6_base_address = 0x00058c00,
33 .ce7_base_address = 0x00059000,
34 .soc_reset_control_si0_rst_mask = 0x00000001,
35 .soc_reset_control_ce_rst_mask = 0x00040000,
36 .soc_chip_id_address = 0x00ec,
37 .scratch_3_address = 0x0030,
38};
39
40const struct ath10k_hw_regs qca6174_regs = {
41 .rtc_state_cold_reset_mask = 0x00002000,
42 .rtc_soc_base_address = 0x00000800,
43 .rtc_wmac_base_address = 0x00001000,
44 .soc_core_base_address = 0x0003a000,
45 .ce_wrapper_base_address = 0x00034000,
46 .ce0_base_address = 0x00034400,
47 .ce1_base_address = 0x00034800,
48 .ce2_base_address = 0x00034c00,
49 .ce3_base_address = 0x00035000,
50 .ce4_base_address = 0x00035400,
51 .ce5_base_address = 0x00035800,
52 .ce6_base_address = 0x00035c00,
53 .ce7_base_address = 0x00036000,
54 .soc_reset_control_si0_rst_mask = 0x00000000,
55 .soc_reset_control_ce_rst_mask = 0x00000001,
56 .soc_chip_id_address = 0x000f0,
57 .scratch_3_address = 0x0028,
58};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index dfedfd0e0f34..460771fcfe9e 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -34,9 +34,50 @@
34#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" 34#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
35#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 35#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
36 36
37/* QCA6174 target BMI version signatures */
38#define QCA6174_HW_1_0_VERSION 0x05000000
39#define QCA6174_HW_1_1_VERSION 0x05000001
40#define QCA6174_HW_1_3_VERSION 0x05000003
41#define QCA6174_HW_2_1_VERSION 0x05010000
42#define QCA6174_HW_3_0_VERSION 0x05020000
43#define QCA6174_HW_3_2_VERSION 0x05030000
44
45enum qca6174_pci_rev {
46 QCA6174_PCI_REV_1_1 = 0x11,
47 QCA6174_PCI_REV_1_3 = 0x13,
48 QCA6174_PCI_REV_2_0 = 0x20,
49 QCA6174_PCI_REV_3_0 = 0x30,
50};
51
52enum qca6174_chip_id_rev {
53 QCA6174_HW_1_0_CHIP_ID_REV = 0,
54 QCA6174_HW_1_1_CHIP_ID_REV = 1,
55 QCA6174_HW_1_3_CHIP_ID_REV = 2,
56 QCA6174_HW_2_1_CHIP_ID_REV = 4,
57 QCA6174_HW_2_2_CHIP_ID_REV = 5,
58 QCA6174_HW_3_0_CHIP_ID_REV = 8,
59 QCA6174_HW_3_1_CHIP_ID_REV = 9,
60 QCA6174_HW_3_2_CHIP_ID_REV = 10,
61};
62
63#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
64#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
65#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
66#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
67#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
68
69#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
70#define QCA6174_HW_3_0_FW_FILE "firmware.bin"
71#define QCA6174_HW_3_0_OTP_FILE "otp.bin"
72#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
73#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
74
37#define ATH10K_FW_API2_FILE "firmware-2.bin" 75#define ATH10K_FW_API2_FILE "firmware-2.bin"
38#define ATH10K_FW_API3_FILE "firmware-3.bin" 76#define ATH10K_FW_API3_FILE "firmware-3.bin"
39 77
78/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
79#define ATH10K_FW_API4_FILE "firmware-4.bin"
80
40#define ATH10K_FW_UTF_FILE "utf.bin" 81#define ATH10K_FW_UTF_FILE "utf.bin"
41 82
42/* includes also the null byte */ 83/* includes also the null byte */
@@ -58,8 +99,57 @@ enum ath10k_fw_ie_type {
58 ATH10K_FW_IE_FEATURES = 2, 99 ATH10K_FW_IE_FEATURES = 2,
59 ATH10K_FW_IE_FW_IMAGE = 3, 100 ATH10K_FW_IE_FW_IMAGE = 3,
60 ATH10K_FW_IE_OTP_IMAGE = 4, 101 ATH10K_FW_IE_OTP_IMAGE = 4,
102
103 /* WMI "operations" interface version, 32 bit value. Supported from
104 * FW API 4 and above.
105 */
106 ATH10K_FW_IE_WMI_OP_VERSION = 5,
107};
108
109enum ath10k_fw_wmi_op_version {
110 ATH10K_FW_WMI_OP_VERSION_UNSET = 0,
111
112 ATH10K_FW_WMI_OP_VERSION_MAIN = 1,
113 ATH10K_FW_WMI_OP_VERSION_10_1 = 2,
114 ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
115 ATH10K_FW_WMI_OP_VERSION_TLV = 4,
116 ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
117
118 /* keep last */
119 ATH10K_FW_WMI_OP_VERSION_MAX,
120};
121
122enum ath10k_hw_rev {
123 ATH10K_HW_QCA988X,
124 ATH10K_HW_QCA6174,
125};
126
127struct ath10k_hw_regs {
128 u32 rtc_state_cold_reset_mask;
129 u32 rtc_soc_base_address;
130 u32 rtc_wmac_base_address;
131 u32 soc_core_base_address;
132 u32 ce_wrapper_base_address;
133 u32 ce0_base_address;
134 u32 ce1_base_address;
135 u32 ce2_base_address;
136 u32 ce3_base_address;
137 u32 ce4_base_address;
138 u32 ce5_base_address;
139 u32 ce6_base_address;
140 u32 ce7_base_address;
141 u32 soc_reset_control_si0_rst_mask;
142 u32 soc_reset_control_ce_rst_mask;
143 u32 soc_chip_id_address;
144 u32 scratch_3_address;
61}; 145};
62 146
147extern const struct ath10k_hw_regs qca988x_regs;
148extern const struct ath10k_hw_regs qca6174_regs;
149
150#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
151#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
152
63/* Known pecularities: 153/* Known pecularities:
64 * - current FW doesn't support raw rx mode (last tested v599) 154 * - current FW doesn't support raw rx mode (last tested v599)
65 * - current FW dumps upon raw tx mode (last tested v599) 155 * - current FW dumps upon raw tx mode (last tested v599)
@@ -162,6 +252,18 @@ struct ath10k_pktlog_hdr {
162#define TARGET_10X_NUM_MSDU_DESC (1024 + 400) 252#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
163#define TARGET_10X_MAX_FRAG_ENTRIES 0 253#define TARGET_10X_MAX_FRAG_ENTRIES 0
164 254
255/* 10.2 parameters */
256#define TARGET_10_2_DMA_BURST_SIZE 1
257
258/* Target specific defines for WMI-TLV firmware */
259#define TARGET_TLV_NUM_VDEVS 3
260#define TARGET_TLV_NUM_STATIONS 32
261#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \
262 (TARGET_TLV_NUM_VDEVS) + \
263 2)
264#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
265#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
266
165/* Number of Copy Engines supported */ 267/* Number of Copy Engines supported */
166#define CE_COUNT 8 268#define CE_COUNT 8
167 269
@@ -192,7 +294,7 @@ struct ath10k_pktlog_hdr {
192/* as of IP3.7.1 */ 294/* as of IP3.7.1 */
193#define RTC_STATE_V_ON 3 295#define RTC_STATE_V_ON 3
194 296
195#define RTC_STATE_COLD_RESET_MASK 0x00000400 297#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
196#define RTC_STATE_V_LSB 0 298#define RTC_STATE_V_LSB 0
197#define RTC_STATE_V_MASK 0x00000007 299#define RTC_STATE_V_MASK 0x00000007
198#define RTC_STATE_ADDRESS 0x0000 300#define RTC_STATE_ADDRESS 0x0000
@@ -201,12 +303,12 @@ struct ath10k_pktlog_hdr {
201#define PCIE_SOC_WAKE_RESET 0x00000000 303#define PCIE_SOC_WAKE_RESET 0x00000000
202#define SOC_GLOBAL_RESET_ADDRESS 0x0008 304#define SOC_GLOBAL_RESET_ADDRESS 0x0008
203 305
204#define RTC_SOC_BASE_ADDRESS 0x00004000 306#define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
205#define RTC_WMAC_BASE_ADDRESS 0x00005000 307#define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
206#define MAC_COEX_BASE_ADDRESS 0x00006000 308#define MAC_COEX_BASE_ADDRESS 0x00006000
207#define BT_COEX_BASE_ADDRESS 0x00007000 309#define BT_COEX_BASE_ADDRESS 0x00007000
208#define SOC_PCIE_BASE_ADDRESS 0x00008000 310#define SOC_PCIE_BASE_ADDRESS 0x00008000
209#define SOC_CORE_BASE_ADDRESS 0x00009000 311#define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
210#define WLAN_UART_BASE_ADDRESS 0x0000c000 312#define WLAN_UART_BASE_ADDRESS 0x0000c000
211#define WLAN_SI_BASE_ADDRESS 0x00010000 313#define WLAN_SI_BASE_ADDRESS 0x00010000
212#define WLAN_GPIO_BASE_ADDRESS 0x00014000 314#define WLAN_GPIO_BASE_ADDRESS 0x00014000
@@ -215,23 +317,23 @@ struct ath10k_pktlog_hdr {
215#define EFUSE_BASE_ADDRESS 0x00030000 317#define EFUSE_BASE_ADDRESS 0x00030000
216#define FPGA_REG_BASE_ADDRESS 0x00039000 318#define FPGA_REG_BASE_ADDRESS 0x00039000
217#define WLAN_UART2_BASE_ADDRESS 0x00054c00 319#define WLAN_UART2_BASE_ADDRESS 0x00054c00
218#define CE_WRAPPER_BASE_ADDRESS 0x00057000 320#define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
219#define CE0_BASE_ADDRESS 0x00057400 321#define CE0_BASE_ADDRESS ar->regs->ce0_base_address
220#define CE1_BASE_ADDRESS 0x00057800 322#define CE1_BASE_ADDRESS ar->regs->ce1_base_address
221#define CE2_BASE_ADDRESS 0x00057c00 323#define CE2_BASE_ADDRESS ar->regs->ce2_base_address
222#define CE3_BASE_ADDRESS 0x00058000 324#define CE3_BASE_ADDRESS ar->regs->ce3_base_address
223#define CE4_BASE_ADDRESS 0x00058400 325#define CE4_BASE_ADDRESS ar->regs->ce4_base_address
224#define CE5_BASE_ADDRESS 0x00058800 326#define CE5_BASE_ADDRESS ar->regs->ce5_base_address
225#define CE6_BASE_ADDRESS 0x00058c00 327#define CE6_BASE_ADDRESS ar->regs->ce6_base_address
226#define CE7_BASE_ADDRESS 0x00059000 328#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
227#define DBI_BASE_ADDRESS 0x00060000 329#define DBI_BASE_ADDRESS 0x00060000
228#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 330#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
229#define PCIE_LOCAL_BASE_ADDRESS 0x00080000 331#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
230 332
231#define SOC_RESET_CONTROL_ADDRESS 0x00000000 333#define SOC_RESET_CONTROL_ADDRESS 0x00000000
232#define SOC_RESET_CONTROL_OFFSET 0x00000000 334#define SOC_RESET_CONTROL_OFFSET 0x00000000
233#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 335#define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
234#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000 336#define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
235#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 337#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
236#define SOC_CPU_CLOCK_OFFSET 0x00000020 338#define SOC_CPU_CLOCK_OFFSET 0x00000020
237#define SOC_CPU_CLOCK_STANDARD_LSB 0 339#define SOC_CPU_CLOCK_STANDARD_LSB 0
@@ -245,7 +347,7 @@ struct ath10k_pktlog_hdr {
245#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 347#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
246#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 348#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
247 349
248#define SOC_CHIP_ID_ADDRESS 0x000000ec 350#define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
249#define SOC_CHIP_ID_REV_LSB 8 351#define SOC_CHIP_ID_REV_LSB 8
250#define SOC_CHIP_ID_REV_MASK 0x00000f00 352#define SOC_CHIP_ID_REV_MASK 0x00000f00
251 353
@@ -301,7 +403,7 @@ struct ath10k_pktlog_hdr {
301#define PCIE_INTR_ENABLE_ADDRESS 0x0008 403#define PCIE_INTR_ENABLE_ADDRESS 0x0008
302#define PCIE_INTR_CAUSE_ADDRESS 0x000c 404#define PCIE_INTR_CAUSE_ADDRESS 0x000c
303#define PCIE_INTR_CLR_ADDRESS 0x0014 405#define PCIE_INTR_CLR_ADDRESS 0x0014
304#define SCRATCH_3_ADDRESS 0x0030 406#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
305#define CPU_INTR_ADDRESS 0x0010 407#define CPU_INTR_ADDRESS 0x0010
306 408
307/* Firmware indications to the Host via SCRATCH_3 register. */ 409/* Firmware indications to the Host via SCRATCH_3 register. */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index c4005670cba2..d6d2f0f00caa 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -27,6 +27,8 @@
27#include "htt.h" 27#include "htt.h"
28#include "txrx.h" 28#include "txrx.h"
29#include "testmode.h" 29#include "testmode.h"
30#include "wmi.h"
31#include "wmi-ops.h"
30 32
31/**********/ 33/**********/
32/* Crypto */ 34/* Crypto */
@@ -35,7 +37,7 @@
35static int ath10k_send_key(struct ath10k_vif *arvif, 37static int ath10k_send_key(struct ath10k_vif *arvif,
36 struct ieee80211_key_conf *key, 38 struct ieee80211_key_conf *key,
37 enum set_key_cmd cmd, 39 enum set_key_cmd cmd,
38 const u8 *macaddr) 40 const u8 *macaddr, bool def_idx)
39{ 41{
40 struct ath10k *ar = arvif->ar; 42 struct ath10k *ar = arvif->ar;
41 struct wmi_vdev_install_key_arg arg = { 43 struct wmi_vdev_install_key_arg arg = {
@@ -56,10 +58,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
56 switch (key->cipher) { 58 switch (key->cipher) {
57 case WLAN_CIPHER_SUITE_CCMP: 59 case WLAN_CIPHER_SUITE_CCMP:
58 arg.key_cipher = WMI_CIPHER_AES_CCM; 60 arg.key_cipher = WMI_CIPHER_AES_CCM;
59 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) 61 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
60 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
61 else
62 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
63 break; 62 break;
64 case WLAN_CIPHER_SUITE_TKIP: 63 case WLAN_CIPHER_SUITE_TKIP:
65 arg.key_cipher = WMI_CIPHER_TKIP; 64 arg.key_cipher = WMI_CIPHER_TKIP;
@@ -73,7 +72,13 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
73 * Otherwise pairwise key must be set */ 72 * Otherwise pairwise key must be set */
74 if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN)) 73 if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
75 arg.key_flags = WMI_KEY_PAIRWISE; 74 arg.key_flags = WMI_KEY_PAIRWISE;
75
76 if (def_idx)
77 arg.key_flags |= WMI_KEY_TX_USAGE;
76 break; 78 break;
79 case WLAN_CIPHER_SUITE_AES_CMAC:
80 /* this one needs to be done in software */
81 return 1;
77 default: 82 default:
78 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 83 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
79 return -EOPNOTSUPP; 84 return -EOPNOTSUPP;
@@ -90,7 +95,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
90static int ath10k_install_key(struct ath10k_vif *arvif, 95static int ath10k_install_key(struct ath10k_vif *arvif,
91 struct ieee80211_key_conf *key, 96 struct ieee80211_key_conf *key,
92 enum set_key_cmd cmd, 97 enum set_key_cmd cmd,
93 const u8 *macaddr) 98 const u8 *macaddr, bool def_idx)
94{ 99{
95 struct ath10k *ar = arvif->ar; 100 struct ath10k *ar = arvif->ar;
96 int ret; 101 int ret;
@@ -99,7 +104,7 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
99 104
100 reinit_completion(&ar->install_key_done); 105 reinit_completion(&ar->install_key_done);
101 106
102 ret = ath10k_send_key(arvif, key, cmd, macaddr); 107 ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
103 if (ret) 108 if (ret)
104 return ret; 109 return ret;
105 110
@@ -117,6 +122,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
117 struct ath10k_peer *peer; 122 struct ath10k_peer *peer;
118 int ret; 123 int ret;
119 int i; 124 int i;
125 bool def_idx;
120 126
121 lockdep_assert_held(&ar->conf_mutex); 127 lockdep_assert_held(&ar->conf_mutex);
122 128
@@ -130,9 +136,14 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
130 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 136 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
131 if (arvif->wep_keys[i] == NULL) 137 if (arvif->wep_keys[i] == NULL)
132 continue; 138 continue;
139 /* set TX_USAGE flag for default key id */
140 if (arvif->def_wep_key_idx == i)
141 def_idx = true;
142 else
143 def_idx = false;
133 144
134 ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, 145 ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
135 addr); 146 addr, def_idx);
136 if (ret) 147 if (ret)
137 return ret; 148 return ret;
138 149
@@ -166,8 +177,9 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
166 if (peer->keys[i] == NULL) 177 if (peer->keys[i] == NULL)
167 continue; 178 continue;
168 179
180 /* key flags are not required to delete the key */
169 ret = ath10k_install_key(arvif, peer->keys[i], 181 ret = ath10k_install_key(arvif, peer->keys[i],
170 DISABLE_KEY, addr); 182 DISABLE_KEY, addr, false);
171 if (ret && first_errno == 0) 183 if (ret && first_errno == 0)
172 first_errno = ret; 184 first_errno = ret;
173 185
@@ -241,8 +253,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
241 253
242 if (i == ARRAY_SIZE(peer->keys)) 254 if (i == ARRAY_SIZE(peer->keys))
243 break; 255 break;
244 256 /* key flags are not required to delete the key */
245 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr); 257 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
246 if (ret && first_errno == 0) 258 if (ret && first_errno == 0)
247 first_errno = ret; 259 first_errno = ret;
248 260
@@ -267,7 +279,10 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
267 case IEEE80211_BAND_2GHZ: 279 case IEEE80211_BAND_2GHZ:
268 switch (chandef->width) { 280 switch (chandef->width) {
269 case NL80211_CHAN_WIDTH_20_NOHT: 281 case NL80211_CHAN_WIDTH_20_NOHT:
270 phymode = MODE_11G; 282 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
283 phymode = MODE_11B;
284 else
285 phymode = MODE_11G;
271 break; 286 break;
272 case NL80211_CHAN_WIDTH_20: 287 case NL80211_CHAN_WIDTH_20:
273 phymode = MODE_11NG_HT20; 288 phymode = MODE_11NG_HT20;
@@ -519,10 +534,14 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
519 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 534 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
520 arvif->beacon->len, DMA_TO_DEVICE); 535 arvif->beacon->len, DMA_TO_DEVICE);
521 536
537 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
538 arvif->beacon_state != ATH10K_BEACON_SENT))
539 return;
540
522 dev_kfree_skb_any(arvif->beacon); 541 dev_kfree_skb_any(arvif->beacon);
523 542
524 arvif->beacon = NULL; 543 arvif->beacon = NULL;
525 arvif->beacon_sent = false; 544 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
526} 545}
527 546
528static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 547static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
@@ -962,6 +981,143 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
962 return ret; 981 return ret;
963} 982}
964 983
984static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
985 struct sk_buff *bcn)
986{
987 struct ath10k *ar = arvif->ar;
988 struct ieee80211_mgmt *mgmt;
989 const u8 *p2p_ie;
990 int ret;
991
992 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
993 return 0;
994
995 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
996 return 0;
997
998 mgmt = (void *)bcn->data;
999 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1000 mgmt->u.beacon.variable,
1001 bcn->len - (mgmt->u.beacon.variable -
1002 bcn->data));
1003 if (!p2p_ie)
1004 return -ENOENT;
1005
1006 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1007 if (ret) {
1008 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1009 arvif->vdev_id, ret);
1010 return ret;
1011 }
1012
1013 return 0;
1014}
1015
1016static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1017 u8 oui_type, size_t ie_offset)
1018{
1019 size_t len;
1020 const u8 *next;
1021 const u8 *end;
1022 u8 *ie;
1023
1024 if (WARN_ON(skb->len < ie_offset))
1025 return -EINVAL;
1026
1027 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1028 skb->data + ie_offset,
1029 skb->len - ie_offset);
1030 if (!ie)
1031 return -ENOENT;
1032
1033 len = ie[1] + 2;
1034 end = skb->data + skb->len;
1035 next = ie + len;
1036
1037 if (WARN_ON(next > end))
1038 return -EINVAL;
1039
1040 memmove(ie, next, end - next);
1041 skb_trim(skb, skb->len - len);
1042
1043 return 0;
1044}
1045
1046static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1047{
1048 struct ath10k *ar = arvif->ar;
1049 struct ieee80211_hw *hw = ar->hw;
1050 struct ieee80211_vif *vif = arvif->vif;
1051 struct ieee80211_mutable_offsets offs = {};
1052 struct sk_buff *bcn;
1053 int ret;
1054
1055 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1056 return 0;
1057
1058 bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1059 if (!bcn) {
1060 ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1061 return -EPERM;
1062 }
1063
1064 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1065 if (ret) {
1066 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1067 kfree_skb(bcn);
1068 return ret;
1069 }
1070
1071 /* P2P IE is inserted by firmware automatically (as configured above)
1072 * so remove it from the base beacon template to avoid duplicate P2P
1073 * IEs in beacon frames.
1074 */
1075 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1076 offsetof(struct ieee80211_mgmt,
1077 u.beacon.variable));
1078
1079 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1080 0, NULL, 0);
1081 kfree_skb(bcn);
1082
1083 if (ret) {
1084 ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1085 ret);
1086 return ret;
1087 }
1088
1089 return 0;
1090}
1091
1092static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1093{
1094 struct ath10k *ar = arvif->ar;
1095 struct ieee80211_hw *hw = ar->hw;
1096 struct ieee80211_vif *vif = arvif->vif;
1097 struct sk_buff *prb;
1098 int ret;
1099
1100 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1101 return 0;
1102
1103 prb = ieee80211_proberesp_get(hw, vif);
1104 if (!prb) {
1105 ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1106 return -EPERM;
1107 }
1108
1109 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1110 kfree_skb(prb);
1111
1112 if (ret) {
1113 ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1114 ret);
1115 return ret;
1116 }
1117
1118 return 0;
1119}
1120
965static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1121static void ath10k_control_beaconing(struct ath10k_vif *arvif,
966 struct ieee80211_bss_conf *info) 1122 struct ieee80211_bss_conf *info)
967{ 1123{
@@ -1046,28 +1202,85 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
1046 arvif->vdev_id, ret); 1202 arvif->vdev_id, ret);
1047} 1203}
1048 1204
1049/* 1205static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1050 * Review this when mac80211 gains per-interface powersave support. 1206{
1051 */ 1207 struct ath10k *ar = arvif->ar;
1208 u32 param;
1209 u32 value;
1210 int ret;
1211
1212 lockdep_assert_held(&arvif->ar->conf_mutex);
1213
1214 if (arvif->u.sta.uapsd)
1215 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1216 else
1217 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1218
1219 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1220 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1221 if (ret) {
1222 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1223 value, arvif->vdev_id, ret);
1224 return ret;
1225 }
1226
1227 return 0;
1228}
1229
1230static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1231{
1232 struct ath10k *ar = arvif->ar;
1233 u32 param;
1234 u32 value;
1235 int ret;
1236
1237 lockdep_assert_held(&arvif->ar->conf_mutex);
1238
1239 if (arvif->u.sta.uapsd)
1240 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1241 else
1242 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1243
1244 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1245 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1246 param, value);
1247 if (ret) {
1248 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1249 value, arvif->vdev_id, ret);
1250 return ret;
1251 }
1252
1253 return 0;
1254}
1255
1052static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1256static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1053{ 1257{
1054 struct ath10k *ar = arvif->ar; 1258 struct ath10k *ar = arvif->ar;
1259 struct ieee80211_vif *vif = arvif->vif;
1055 struct ieee80211_conf *conf = &ar->hw->conf; 1260 struct ieee80211_conf *conf = &ar->hw->conf;
1056 enum wmi_sta_powersave_param param; 1261 enum wmi_sta_powersave_param param;
1057 enum wmi_sta_ps_mode psmode; 1262 enum wmi_sta_ps_mode psmode;
1058 int ret; 1263 int ret;
1264 int ps_timeout;
1059 1265
1060 lockdep_assert_held(&arvif->ar->conf_mutex); 1266 lockdep_assert_held(&arvif->ar->conf_mutex);
1061 1267
1062 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1268 if (arvif->vif->type != NL80211_IFTYPE_STATION)
1063 return 0; 1269 return 0;
1064 1270
1065 if (conf->flags & IEEE80211_CONF_PS) { 1271 if (vif->bss_conf.ps) {
1066 psmode = WMI_STA_PS_MODE_ENABLED; 1272 psmode = WMI_STA_PS_MODE_ENABLED;
1067 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1273 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1068 1274
1275 ps_timeout = conf->dynamic_ps_timeout;
1276 if (ps_timeout == 0) {
1277 /* Firmware doesn't like 0 */
1278 ps_timeout = ieee80211_tu_to_usec(
1279 vif->bss_conf.beacon_int) / 1000;
1280 }
1281
1069 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1282 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1070 conf->dynamic_ps_timeout); 1283 ps_timeout);
1071 if (ret) { 1284 if (ret) {
1072 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1285 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1073 arvif->vdev_id, ret); 1286 arvif->vdev_id, ret);
@@ -1090,6 +1303,38 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1090 return 0; 1303 return 0;
1091} 1304}
1092 1305
1306static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1307{
1308 struct ath10k *ar = arvif->ar;
1309 struct wmi_sta_keepalive_arg arg = {};
1310 int ret;
1311
1312 lockdep_assert_held(&arvif->ar->conf_mutex);
1313
1314 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1315 return 0;
1316
1317 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1318 return 0;
1319
1320 /* Some firmware revisions have a bug and ignore the `enabled` field.
1321 * Instead use the interval to disable the keepalive.
1322 */
1323 arg.vdev_id = arvif->vdev_id;
1324 arg.enabled = 1;
1325 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1326 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1327
1328 ret = ath10k_wmi_sta_keepalive(ar, &arg);
1329 if (ret) {
1330 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1331 arvif->vdev_id, ret);
1332 return ret;
1333 }
1334
1335 return 0;
1336}
1337
1093/**********************/ 1338/**********************/
1094/* Station management */ 1339/* Station management */
1095/**********************/ 1340/**********************/
@@ -1358,6 +1603,10 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
1358 return; 1603 return;
1359 1604
1360 arg->peer_flags |= WMI_PEER_VHT; 1605 arg->peer_flags |= WMI_PEER_VHT;
1606
1607 if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
1608 arg->peer_flags |= WMI_PEER_VHT_2G;
1609
1361 arg->peer_vht_caps = vht_cap->cap; 1610 arg->peer_vht_caps = vht_cap->cap;
1362 1611
1363 ampdu_factor = (vht_cap->cap & 1612 ampdu_factor = (vht_cap->cap &
@@ -1409,9 +1658,22 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
1409 if (vif->bss_conf.qos) 1658 if (vif->bss_conf.qos)
1410 arg->peer_flags |= WMI_PEER_QOS; 1659 arg->peer_flags |= WMI_PEER_QOS;
1411 break; 1660 break;
1661 case WMI_VDEV_TYPE_IBSS:
1662 if (sta->wme)
1663 arg->peer_flags |= WMI_PEER_QOS;
1664 break;
1412 default: 1665 default:
1413 break; 1666 break;
1414 } 1667 }
1668
1669 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
1670 sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
1671}
1672
1673static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta)
1674{
1675 /* First 4 rates in ath10k_rates are CCK (11b) rates. */
1676 return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4;
1415} 1677}
1416 1678
1417static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 1679static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
@@ -1423,13 +1685,20 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
1423 1685
1424 switch (ar->hw->conf.chandef.chan->band) { 1686 switch (ar->hw->conf.chandef.chan->band) {
1425 case IEEE80211_BAND_2GHZ: 1687 case IEEE80211_BAND_2GHZ:
1426 if (sta->ht_cap.ht_supported) { 1688 if (sta->vht_cap.vht_supported) {
1689 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
1690 phymode = MODE_11AC_VHT40;
1691 else
1692 phymode = MODE_11AC_VHT20;
1693 } else if (sta->ht_cap.ht_supported) {
1427 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 1694 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
1428 phymode = MODE_11NG_HT40; 1695 phymode = MODE_11NG_HT40;
1429 else 1696 else
1430 phymode = MODE_11NG_HT20; 1697 phymode = MODE_11NG_HT20;
1431 } else { 1698 } else if (ath10k_mac_sta_has_11g_rates(sta)) {
1432 phymode = MODE_11G; 1699 phymode = MODE_11G;
1700 } else {
1701 phymode = MODE_11B;
1433 } 1702 }
1434 1703
1435 break; 1704 break;
@@ -1603,7 +1872,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
1603 ath10k_warn(ar, "faield to down vdev %i: %d\n", 1872 ath10k_warn(ar, "faield to down vdev %i: %d\n",
1604 arvif->vdev_id, ret); 1873 arvif->vdev_id, ret);
1605 1874
1606 arvif->def_wep_key_idx = 0; 1875 arvif->def_wep_key_idx = -1;
1876
1607 arvif->is_up = false; 1877 arvif->is_up = false;
1608} 1878}
1609 1879
@@ -1662,11 +1932,14 @@ static int ath10k_station_assoc(struct ath10k *ar,
1662 } 1932 }
1663 } 1933 }
1664 1934
1665 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 1935 /* Plumb cached keys only for static WEP */
1666 if (ret) { 1936 if (arvif->def_wep_key_idx != -1) {
1667 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 1937 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
1668 arvif->vdev_id, ret); 1938 if (ret) {
1669 return ret; 1939 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
1940 arvif->vdev_id, ret);
1941 return ret;
1942 }
1670 } 1943 }
1671 } 1944 }
1672 1945
@@ -1931,75 +2204,13 @@ static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
1931 * used only for CQM purposes (e.g. hostapd station keepalive ping) so 2204 * used only for CQM purposes (e.g. hostapd station keepalive ping) so
1932 * it is safe to downgrade to NullFunc. 2205 * it is safe to downgrade to NullFunc.
1933 */ 2206 */
2207 hdr = (void *)skb->data;
1934 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) { 2208 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1935 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2209 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1936 cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; 2210 cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1937 } 2211 }
1938} 2212}
1939 2213
1940static void ath10k_tx_wep_key_work(struct work_struct *work)
1941{
1942 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1943 wep_key_work);
1944 struct ath10k *ar = arvif->ar;
1945 int ret, keyidx = arvif->def_wep_key_newidx;
1946
1947 mutex_lock(&arvif->ar->conf_mutex);
1948
1949 if (arvif->ar->state != ATH10K_STATE_ON)
1950 goto unlock;
1951
1952 if (arvif->def_wep_key_idx == keyidx)
1953 goto unlock;
1954
1955 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
1956 arvif->vdev_id, keyidx);
1957
1958 ret = ath10k_wmi_vdev_set_param(arvif->ar,
1959 arvif->vdev_id,
1960 arvif->ar->wmi.vdev_param->def_keyid,
1961 keyidx);
1962 if (ret) {
1963 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
1964 arvif->vdev_id,
1965 ret);
1966 goto unlock;
1967 }
1968
1969 arvif->def_wep_key_idx = keyidx;
1970
1971unlock:
1972 mutex_unlock(&arvif->ar->conf_mutex);
1973}
1974
1975static void ath10k_tx_h_update_wep_key(struct ieee80211_vif *vif,
1976 struct ieee80211_key_conf *key,
1977 struct sk_buff *skb)
1978{
1979 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1980 struct ath10k *ar = arvif->ar;
1981 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1982
1983 if (!ieee80211_has_protected(hdr->frame_control))
1984 return;
1985
1986 if (!key)
1987 return;
1988
1989 if (key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
1990 key->cipher != WLAN_CIPHER_SUITE_WEP104)
1991 return;
1992
1993 if (key->keyidx == arvif->def_wep_key_idx)
1994 return;
1995
1996 /* FIXME: Most likely a few frames will be TXed with an old key. Simply
1997 * queueing frames until key index is updated is not an option because
1998 * sk_buff may need more processing to be done, e.g. offchannel */
1999 arvif->def_wep_key_newidx = key->keyidx;
2000 ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
2001}
2002
2003static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 2214static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
2004 struct ieee80211_vif *vif, 2215 struct ieee80211_vif *vif,
2005 struct sk_buff *skb) 2216 struct sk_buff *skb)
@@ -2151,7 +2362,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
2151 2362
2152 ret = wait_for_completion_timeout(&ar->offchan_tx_completed, 2363 ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
2153 3 * HZ); 2364 3 * HZ);
2154 if (ret <= 0) 2365 if (ret == 0)
2155 ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", 2366 ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
2156 skb); 2367 skb);
2157 2368
@@ -2213,6 +2424,7 @@ void __ath10k_scan_finish(struct ath10k *ar)
2213 case ATH10K_SCAN_RUNNING: 2424 case ATH10K_SCAN_RUNNING:
2214 if (ar->scan.is_roc) 2425 if (ar->scan.is_roc)
2215 ieee80211_remain_on_channel_expired(ar->hw); 2426 ieee80211_remain_on_channel_expired(ar->hw);
2427 /* fall through */
2216 case ATH10K_SCAN_ABORTING: 2428 case ATH10K_SCAN_ABORTING:
2217 if (!ar->scan.is_roc) 2429 if (!ar->scan.is_roc)
2218 ieee80211_scan_completed(ar->hw, 2430 ieee80211_scan_completed(ar->hw,
@@ -2359,7 +2571,6 @@ static void ath10k_tx(struct ieee80211_hw *hw,
2359 struct ath10k *ar = hw->priv; 2571 struct ath10k *ar = hw->priv;
2360 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2572 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2361 struct ieee80211_vif *vif = info->control.vif; 2573 struct ieee80211_vif *vif = info->control.vif;
2362 struct ieee80211_key_conf *key = info->control.hw_key;
2363 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2574 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2364 2575
2365 /* We should disable CCK RATE due to P2P */ 2576 /* We should disable CCK RATE due to P2P */
@@ -2373,7 +2584,6 @@ static void ath10k_tx(struct ieee80211_hw *hw,
2373 /* it makes no sense to process injected frames like that */ 2584 /* it makes no sense to process injected frames like that */
2374 if (vif && vif->type != NL80211_IFTYPE_MONITOR) { 2585 if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
2375 ath10k_tx_h_nwifi(hw, skb); 2586 ath10k_tx_h_nwifi(hw, skb);
2376 ath10k_tx_h_update_wep_key(vif, key, skb);
2377 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 2587 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
2378 ath10k_tx_h_seq_no(vif, skb); 2588 ath10k_tx_h_seq_no(vif, skb);
2379 } 2589 }
@@ -2871,6 +3081,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2871 int bit; 3081 int bit;
2872 u32 vdev_param; 3082 u32 vdev_param;
2873 3083
3084 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
3085
2874 mutex_lock(&ar->conf_mutex); 3086 mutex_lock(&ar->conf_mutex);
2875 3087
2876 memset(arvif, 0, sizeof(*arvif)); 3088 memset(arvif, 0, sizeof(*arvif));
@@ -2878,7 +3090,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2878 arvif->ar = ar; 3090 arvif->ar = ar;
2879 arvif->vif = vif; 3091 arvif->vif = vif;
2880 3092
2881 INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
2882 INIT_LIST_HEAD(&arvif->list); 3093 INIT_LIST_HEAD(&arvif->list);
2883 3094
2884 if (ar->free_vdev_map == 0) { 3095 if (ar->free_vdev_map == 0) {
@@ -2894,10 +3105,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2894 arvif->vdev_id = bit; 3105 arvif->vdev_id = bit;
2895 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; 3106 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
2896 3107
2897 if (ar->p2p)
2898 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
2899
2900 switch (vif->type) { 3108 switch (vif->type) {
3109 case NL80211_IFTYPE_P2P_DEVICE:
3110 arvif->vdev_type = WMI_VDEV_TYPE_STA;
3111 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
3112 break;
2901 case NL80211_IFTYPE_UNSPECIFIED: 3113 case NL80211_IFTYPE_UNSPECIFIED:
2902 case NL80211_IFTYPE_STATION: 3114 case NL80211_IFTYPE_STATION:
2903 arvif->vdev_type = WMI_VDEV_TYPE_STA; 3115 arvif->vdev_type = WMI_VDEV_TYPE_STA;
@@ -2966,15 +3178,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
2966 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 3178 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
2967 list_add(&arvif->list, &ar->arvifs); 3179 list_add(&arvif->list, &ar->arvifs);
2968 3180
2969 vdev_param = ar->wmi.vdev_param->def_keyid; 3181 /* It makes no sense to have firmware do keepalives. mac80211 already
2970 ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, 3182 * takes care of this with idle connection polling.
2971 arvif->def_wep_key_idx); 3183 */
3184 ret = ath10k_mac_vif_disable_keepalive(arvif);
2972 if (ret) { 3185 if (ret) {
2973 ath10k_warn(ar, "failed to set vdev %i default key id: %d\n", 3186 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
2974 arvif->vdev_id, ret); 3187 arvif->vdev_id, ret);
2975 goto err_vdev_delete; 3188 goto err_vdev_delete;
2976 } 3189 }
2977 3190
3191 arvif->def_wep_key_idx = -1;
3192
2978 vdev_param = ar->wmi.vdev_param->tx_encap_type; 3193 vdev_param = ar->wmi.vdev_param->tx_encap_type;
2979 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 3194 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2980 ATH10K_HW_TXRX_NATIVE_WIFI); 3195 ATH10K_HW_TXRX_NATIVE_WIFI);
@@ -3026,22 +3241,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
3026 goto err_peer_delete; 3241 goto err_peer_delete;
3027 } 3242 }
3028 3243
3029 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 3244 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
3030 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
3031 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
3032 param, value);
3033 if (ret) { 3245 if (ret) {
3034 ath10k_warn(ar, "failed to set vdev %i TX wake thresh: %d\n", 3246 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
3035 arvif->vdev_id, ret); 3247 arvif->vdev_id, ret);
3036 goto err_peer_delete; 3248 goto err_peer_delete;
3037 } 3249 }
3038 3250
3039 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 3251 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
3040 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
3041 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
3042 param, value);
3043 if (ret) { 3252 if (ret) {
3044 ath10k_warn(ar, "failed to set vdev %i PSPOLL count: %d\n", 3253 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
3045 arvif->vdev_id, ret); 3254 arvif->vdev_id, ret);
3046 goto err_peer_delete; 3255 goto err_peer_delete;
3047 } 3256 }
@@ -3099,8 +3308,6 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
3099 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 3308 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3100 int ret; 3309 int ret;
3101 3310
3102 cancel_work_sync(&arvif->wep_key_work);
3103
3104 mutex_lock(&ar->conf_mutex); 3311 mutex_lock(&ar->conf_mutex);
3105 3312
3106 spin_lock_bh(&ar->data_lock); 3313 spin_lock_bh(&ar->data_lock);
@@ -3211,9 +3418,21 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
3211 if (ret) 3418 if (ret)
3212 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 3419 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
3213 arvif->vdev_id, ret); 3420 arvif->vdev_id, ret);
3421
3422 ret = ath10k_mac_setup_bcn_tmpl(arvif);
3423 if (ret)
3424 ath10k_warn(ar, "failed to update beacon template: %d\n",
3425 ret);
3214 } 3426 }
3215 3427
3216 if (changed & BSS_CHANGED_BEACON_INFO) { 3428 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
3429 ret = ath10k_mac_setup_prb_tmpl(arvif);
3430 if (ret)
3431 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
3432 arvif->vdev_id, ret);
3433 }
3434
3435 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
3217 arvif->dtim_period = info->dtim_period; 3436 arvif->dtim_period = info->dtim_period;
3218 3437
3219 ath10k_dbg(ar, ATH10K_DBG_MAC, 3438 ath10k_dbg(ar, ATH10K_DBG_MAC,
@@ -3314,6 +3533,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
3314 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 3533 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
3315 } 3534 }
3316 3535
3536 if (changed & BSS_CHANGED_PS) {
3537 ret = ath10k_mac_vif_setup_ps(arvif);
3538 if (ret)
3539 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
3540 arvif->vdev_id, ret);
3541 }
3542
3317 mutex_unlock(&ar->conf_mutex); 3543 mutex_unlock(&ar->conf_mutex);
3318} 3544}
3319 3545
@@ -3453,6 +3679,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3453 const u8 *peer_addr; 3679 const u8 *peer_addr;
3454 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 3680 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3455 key->cipher == WLAN_CIPHER_SUITE_WEP104; 3681 key->cipher == WLAN_CIPHER_SUITE_WEP104;
3682 bool def_idx = false;
3456 int ret = 0; 3683 int ret = 0;
3457 3684
3458 if (key->keyidx > WMI_MAX_KEY_INDEX) 3685 if (key->keyidx > WMI_MAX_KEY_INDEX)
@@ -3498,7 +3725,14 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3498 ath10k_clear_vdev_key(arvif, key); 3725 ath10k_clear_vdev_key(arvif, key);
3499 } 3726 }
3500 3727
3501 ret = ath10k_install_key(arvif, key, cmd, peer_addr); 3728 /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
3729 * static WEP, do not set this flag for the keys whose key id
3730 * is greater than default key id.
3731 */
3732 if (arvif->def_wep_key_idx == -1)
3733 def_idx = true;
3734
3735 ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
3502 if (ret) { 3736 if (ret) {
3503 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 3737 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
3504 arvif->vdev_id, peer_addr, ret); 3738 arvif->vdev_id, peer_addr, ret);
@@ -3523,6 +3757,39 @@ exit:
3523 return ret; 3757 return ret;
3524} 3758}
3525 3759
3760static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
3761 struct ieee80211_vif *vif,
3762 int keyidx)
3763{
3764 struct ath10k *ar = hw->priv;
3765 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3766 int ret;
3767
3768 mutex_lock(&arvif->ar->conf_mutex);
3769
3770 if (arvif->ar->state != ATH10K_STATE_ON)
3771 goto unlock;
3772
3773 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
3774 arvif->vdev_id, keyidx);
3775
3776 ret = ath10k_wmi_vdev_set_param(arvif->ar,
3777 arvif->vdev_id,
3778 arvif->ar->wmi.vdev_param->def_keyid,
3779 keyidx);
3780
3781 if (ret) {
3782 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
3783 arvif->vdev_id,
3784 ret);
3785 goto unlock;
3786 }
3787
3788 arvif->def_wep_key_idx = keyidx;
3789unlock:
3790 mutex_unlock(&arvif->ar->conf_mutex);
3791}
3792
3526static void ath10k_sta_rc_update_wk(struct work_struct *wk) 3793static void ath10k_sta_rc_update_wk(struct work_struct *wk)
3527{ 3794{
3528 struct ath10k *ar; 3795 struct ath10k *ar;
@@ -3583,8 +3850,9 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
3583 sta->addr, smps, err); 3850 sta->addr, smps, err);
3584 } 3851 }
3585 3852
3586 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { 3853 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
3587 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", 3854 changed & IEEE80211_RC_NSS_CHANGED) {
3855 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
3588 sta->addr); 3856 sta->addr);
3589 3857
3590 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 3858 err = ath10k_station_assoc(ar, arvif->vif, sta, true);
@@ -3757,6 +4025,8 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
3757 u16 ac, bool enable) 4025 u16 ac, bool enable)
3758{ 4026{
3759 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 4027 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4028 struct wmi_sta_uapsd_auto_trig_arg arg = {};
4029 u32 prio = 0, acc = 0;
3760 u32 value = 0; 4030 u32 value = 0;
3761 int ret = 0; 4031 int ret = 0;
3762 4032
@@ -3769,18 +4039,26 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
3769 case IEEE80211_AC_VO: 4039 case IEEE80211_AC_VO:
3770 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 4040 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
3771 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 4041 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
4042 prio = 7;
4043 acc = 3;
3772 break; 4044 break;
3773 case IEEE80211_AC_VI: 4045 case IEEE80211_AC_VI:
3774 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 4046 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
3775 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 4047 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
4048 prio = 5;
4049 acc = 2;
3776 break; 4050 break;
3777 case IEEE80211_AC_BE: 4051 case IEEE80211_AC_BE:
3778 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 4052 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
3779 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 4053 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
4054 prio = 2;
4055 acc = 1;
3780 break; 4056 break;
3781 case IEEE80211_AC_BK: 4057 case IEEE80211_AC_BK:
3782 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 4058 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
3783 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 4059 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
4060 prio = 0;
4061 acc = 0;
3784 break; 4062 break;
3785 } 4063 }
3786 4064
@@ -3808,6 +4086,43 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
3808 if (ret) 4086 if (ret)
3809 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 4087 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
3810 4088
4089 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
4090 if (ret) {
4091 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
4092 arvif->vdev_id, ret);
4093 return ret;
4094 }
4095
4096 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
4097 if (ret) {
4098 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
4099 arvif->vdev_id, ret);
4100 return ret;
4101 }
4102
4103 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
4104 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
4105 /* Only userspace can make an educated decision when to send
4106 * trigger frame. The following effectively disables u-UAPSD
4107 * autotrigger in firmware (which is enabled by default
4108 * provided the autotrigger service is available).
4109 */
4110
4111 arg.wmm_ac = acc;
4112 arg.user_priority = prio;
4113 arg.service_interval = 0;
4114 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
4115 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
4116
4117 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
4118 arvif->bssid, &arg, 1);
4119 if (ret) {
4120 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
4121 ret);
4122 return ret;
4123 }
4124 }
4125
3811exit: 4126exit:
3812 return ret; 4127 return ret;
3813} 4128}
@@ -3817,6 +4132,7 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
3817 const struct ieee80211_tx_queue_params *params) 4132 const struct ieee80211_tx_queue_params *params)
3818{ 4133{
3819 struct ath10k *ar = hw->priv; 4134 struct ath10k *ar = hw->priv;
4135 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3820 struct wmi_wmm_params_arg *p = NULL; 4136 struct wmi_wmm_params_arg *p = NULL;
3821 int ret; 4137 int ret;
3822 4138
@@ -3824,16 +4140,16 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
3824 4140
3825 switch (ac) { 4141 switch (ac) {
3826 case IEEE80211_AC_VO: 4142 case IEEE80211_AC_VO:
3827 p = &ar->wmm_params.ac_vo; 4143 p = &arvif->wmm_params.ac_vo;
3828 break; 4144 break;
3829 case IEEE80211_AC_VI: 4145 case IEEE80211_AC_VI:
3830 p = &ar->wmm_params.ac_vi; 4146 p = &arvif->wmm_params.ac_vi;
3831 break; 4147 break;
3832 case IEEE80211_AC_BE: 4148 case IEEE80211_AC_BE:
3833 p = &ar->wmm_params.ac_be; 4149 p = &arvif->wmm_params.ac_be;
3834 break; 4150 break;
3835 case IEEE80211_AC_BK: 4151 case IEEE80211_AC_BK:
3836 p = &ar->wmm_params.ac_bk; 4152 p = &arvif->wmm_params.ac_bk;
3837 break; 4153 break;
3838 } 4154 }
3839 4155
@@ -3853,11 +4169,23 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
3853 */ 4169 */
3854 p->txop = params->txop * 32; 4170 p->txop = params->txop * 32;
3855 4171
3856 /* FIXME: FW accepts wmm params per hw, not per vif */ 4172 if (ar->wmi.ops->gen_vdev_wmm_conf) {
3857 ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params); 4173 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
3858 if (ret) { 4174 &arvif->wmm_params);
3859 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 4175 if (ret) {
3860 goto exit; 4176 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
4177 arvif->vdev_id, ret);
4178 goto exit;
4179 }
4180 } else {
4181 /* This won't work well with multi-interface cases but it's
4182 * better than nothing.
4183 */
4184 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
4185 if (ret) {
4186 ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
4187 goto exit;
4188 }
3861 } 4189 }
3862 4190
3863 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 4191 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
@@ -3989,29 +4317,6 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3989 return ret; 4317 return ret;
3990} 4318}
3991 4319
3992static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3993{
3994 struct ath10k *ar = hw->priv;
3995 struct ath10k_vif *arvif;
3996 int ret = 0;
3997
3998 mutex_lock(&ar->conf_mutex);
3999 list_for_each_entry(arvif, &ar->arvifs, list) {
4000 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
4001 arvif->vdev_id, value);
4002
4003 ret = ath10k_mac_set_frag(arvif, value);
4004 if (ret) {
4005 ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n",
4006 arvif->vdev_id, ret);
4007 break;
4008 }
4009 }
4010 mutex_unlock(&ar->conf_mutex);
4011
4012 return ret;
4013}
4014
4015static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4320static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4016 u32 queues, bool drop) 4321 u32 queues, bool drop)
4017{ 4322{
@@ -4650,12 +4955,12 @@ static const struct ieee80211_ops ath10k_ops = {
4650 .hw_scan = ath10k_hw_scan, 4955 .hw_scan = ath10k_hw_scan,
4651 .cancel_hw_scan = ath10k_cancel_hw_scan, 4956 .cancel_hw_scan = ath10k_cancel_hw_scan,
4652 .set_key = ath10k_set_key, 4957 .set_key = ath10k_set_key,
4958 .set_default_unicast_key = ath10k_set_default_unicast_key,
4653 .sta_state = ath10k_sta_state, 4959 .sta_state = ath10k_sta_state,
4654 .conf_tx = ath10k_conf_tx, 4960 .conf_tx = ath10k_conf_tx,
4655 .remain_on_channel = ath10k_remain_on_channel, 4961 .remain_on_channel = ath10k_remain_on_channel,
4656 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 4962 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
4657 .set_rts_threshold = ath10k_set_rts_threshold, 4963 .set_rts_threshold = ath10k_set_rts_threshold,
4658 .set_frag_threshold = ath10k_set_frag_threshold,
4659 .flush = ath10k_flush, 4964 .flush = ath10k_flush,
4660 .tx_last_beacon = ath10k_tx_last_beacon, 4965 .tx_last_beacon = ath10k_tx_last_beacon,
4661 .set_antenna = ath10k_set_antenna, 4966 .set_antenna = ath10k_set_antenna,
@@ -4676,6 +4981,9 @@ static const struct ieee80211_ops ath10k_ops = {
4676 .suspend = ath10k_suspend, 4981 .suspend = ath10k_suspend,
4677 .resume = ath10k_resume, 4982 .resume = ath10k_resume,
4678#endif 4983#endif
4984#ifdef CONFIG_MAC80211_DEBUGFS
4985 .sta_add_debugfs = ath10k_sta_add_debugfs,
4986#endif
4679}; 4987};
4680 4988
4681#define RATETAB_ENT(_rate, _rateid, _flags) { \ 4989#define RATETAB_ENT(_rate, _rateid, _flags) { \
@@ -4746,6 +5054,9 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
4746 CHAN5G(165, 5825, 0), 5054 CHAN5G(165, 5825, 0),
4747}; 5055};
4748 5056
5057/* Note: Be careful if you re-order these. There is code which depends on this
5058 * ordering.
5059 */
4749static struct ieee80211_rate ath10k_rates[] = { 5060static struct ieee80211_rate ath10k_rates[] = {
4750 /* CCK */ 5061 /* CCK */
4751 RATETAB_ENT(10, 0x82, 0), 5062 RATETAB_ENT(10, 0x82, 0),
@@ -4799,6 +5110,10 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
4799 .types = BIT(NL80211_IFTYPE_P2P_GO) 5110 .types = BIT(NL80211_IFTYPE_P2P_GO)
4800 }, 5111 },
4801 { 5112 {
5113 .max = 1,
5114 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
5115 },
5116 {
4802 .max = 7, 5117 .max = 7,
4803 .types = BIT(NL80211_IFTYPE_AP) 5118 .types = BIT(NL80211_IFTYPE_AP)
4804 }, 5119 },
@@ -4956,6 +5271,13 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
4956 5271
4957int ath10k_mac_register(struct ath10k *ar) 5272int ath10k_mac_register(struct ath10k *ar)
4958{ 5273{
5274 static const u32 cipher_suites[] = {
5275 WLAN_CIPHER_SUITE_WEP40,
5276 WLAN_CIPHER_SUITE_WEP104,
5277 WLAN_CIPHER_SUITE_TKIP,
5278 WLAN_CIPHER_SUITE_CCMP,
5279 WLAN_CIPHER_SUITE_AES_CMAC,
5280 };
4959 struct ieee80211_supported_band *band; 5281 struct ieee80211_supported_band *band;
4960 struct ieee80211_sta_vht_cap vht_cap; 5282 struct ieee80211_sta_vht_cap vht_cap;
4961 struct ieee80211_sta_ht_cap ht_cap; 5283 struct ieee80211_sta_ht_cap ht_cap;
@@ -4985,7 +5307,8 @@ int ath10k_mac_register(struct ath10k *ar)
4985 band->bitrates = ath10k_g_rates; 5307 band->bitrates = ath10k_g_rates;
4986 band->ht_cap = ht_cap; 5308 band->ht_cap = ht_cap;
4987 5309
4988 /* vht is not supported in 2.4 GHz */ 5310 /* Enable the VHT support at 2.4 GHz */
5311 band->vht_cap = vht_cap;
4989 5312
4990 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; 5313 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
4991 } 5314 }
@@ -5018,18 +5341,19 @@ int ath10k_mac_register(struct ath10k *ar)
5018 5341
5019 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features)) 5342 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
5020 ar->hw->wiphy->interface_modes |= 5343 ar->hw->wiphy->interface_modes |=
5344 BIT(NL80211_IFTYPE_P2P_DEVICE) |
5021 BIT(NL80211_IFTYPE_P2P_CLIENT) | 5345 BIT(NL80211_IFTYPE_P2P_CLIENT) |
5022 BIT(NL80211_IFTYPE_P2P_GO); 5346 BIT(NL80211_IFTYPE_P2P_GO);
5023 5347
5024 ar->hw->flags = IEEE80211_HW_SIGNAL_DBM | 5348 ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5025 IEEE80211_HW_SUPPORTS_PS | 5349 IEEE80211_HW_SUPPORTS_PS |
5026 IEEE80211_HW_SUPPORTS_DYNAMIC_PS | 5350 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5027 IEEE80211_HW_SUPPORTS_UAPSD |
5028 IEEE80211_HW_MFP_CAPABLE | 5351 IEEE80211_HW_MFP_CAPABLE |
5029 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 5352 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5030 IEEE80211_HW_HAS_RATE_CONTROL | 5353 IEEE80211_HW_HAS_RATE_CONTROL |
5031 IEEE80211_HW_AP_LINK_PS | 5354 IEEE80211_HW_AP_LINK_PS |
5032 IEEE80211_HW_SPECTRUM_MGMT; 5355 IEEE80211_HW_SPECTRUM_MGMT |
5356 IEEE80211_HW_SW_CRYPTO_CONTROL;
5033 5357
5034 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 5358 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
5035 5359
@@ -5049,6 +5373,19 @@ int ath10k_mac_register(struct ath10k *ar)
5049 5373
5050 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 5374 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
5051 5375
5376 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
5377 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5378
5379 /* Firmware delivers WPS/P2P Probe Requests frames to driver so
5380 * that userspace (e.g. wpa_supplicant/hostapd) can generate
5381 * correct Probe Responses. This is more of a hack advert..
5382 */
5383 ar->hw->wiphy->probe_resp_offload |=
5384 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5385 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5386 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5387 }
5388
5052 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 5389 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5053 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 5390 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
5054 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 5391 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
@@ -5062,16 +5399,26 @@ int ath10k_mac_register(struct ath10k *ar)
5062 */ 5399 */
5063 ar->hw->queues = 4; 5400 ar->hw->queues = 4;
5064 5401
5065 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 5402 switch (ar->wmi.op_version) {
5066 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 5403 case ATH10K_FW_WMI_OP_VERSION_MAIN:
5067 ar->hw->wiphy->n_iface_combinations = 5404 case ATH10K_FW_WMI_OP_VERSION_TLV:
5068 ARRAY_SIZE(ath10k_10x_if_comb);
5069 } else {
5070 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 5405 ar->hw->wiphy->iface_combinations = ath10k_if_comb;
5071 ar->hw->wiphy->n_iface_combinations = 5406 ar->hw->wiphy->n_iface_combinations =
5072 ARRAY_SIZE(ath10k_if_comb); 5407 ARRAY_SIZE(ath10k_if_comb);
5073
5074 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 5408 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
5409 break;
5410 case ATH10K_FW_WMI_OP_VERSION_10_1:
5411 case ATH10K_FW_WMI_OP_VERSION_10_2:
5412 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
5413 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
5414 ar->hw->wiphy->n_iface_combinations =
5415 ARRAY_SIZE(ath10k_10x_if_comb);
5416 break;
5417 case ATH10K_FW_WMI_OP_VERSION_UNSET:
5418 case ATH10K_FW_WMI_OP_VERSION_MAX:
5419 WARN_ON(1);
5420 ret = -EINVAL;
5421 goto err_free;
5075 } 5422 }
5076 5423
5077 ar->hw->netdev_features = NETIF_F_HW_CSUM; 5424 ar->hw->netdev_features = NETIF_F_HW_CSUM;
@@ -5093,6 +5440,9 @@ int ath10k_mac_register(struct ath10k *ar)
5093 goto err_free; 5440 goto err_free;
5094 } 5441 }
5095 5442
5443 ar->hw->wiphy->cipher_suites = cipher_suites;
5444 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5445
5096 ret = ieee80211_register_hw(ar->hw); 5446 ret = ieee80211_register_hw(ar->hw);
5097 if (ret) { 5447 if (ret) {
5098 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 5448 ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 7abb8367119a..e6972b09333e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,12 +58,27 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
59 59
60#define QCA988X_2_0_DEVICE_ID (0x003c) 60#define QCA988X_2_0_DEVICE_ID (0x003c)
61#define QCA6174_2_1_DEVICE_ID (0x003e)
61 62
62static const struct pci_device_id ath10k_pci_id_table[] = { 63static const struct pci_device_id ath10k_pci_id_table[] = {
63 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 64 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
65 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
64 {0} 66 {0}
65}; 67};
66 68
69static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
70 /* QCA988X pre 2.0 chips are not supported because they need some nasty
71 * hacks. ath10k doesn't have them and these devices crash horribly
72 * because of that.
73 */
74 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
75 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
76 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
77 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
78 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
79 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
80};
81
67static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 82static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
68static int ath10k_pci_cold_reset(struct ath10k *ar); 83static int ath10k_pci_cold_reset(struct ath10k *ar);
69static int ath10k_pci_warm_reset(struct ath10k *ar); 84static int ath10k_pci_warm_reset(struct ath10k *ar);
@@ -395,7 +410,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
395 return -EIO; 410 return -EIO;
396 } 411 }
397 412
398 ATH10K_SKB_CB(skb)->paddr = paddr; 413 ATH10K_SKB_RXCB(skb)->paddr = paddr;
399 414
400 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); 415 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
401 if (ret) { 416 if (ret) {
@@ -864,7 +879,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
864 &flags) == 0) { 879 &flags) == 0) {
865 skb = transfer_context; 880 skb = transfer_context;
866 max_nbytes = skb->len + skb_tailroom(skb); 881 max_nbytes = skb->len + skb_tailroom(skb);
867 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 882 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
868 max_nbytes, DMA_FROM_DEVICE); 883 max_nbytes, DMA_FROM_DEVICE);
869 884
870 if (unlikely(max_nbytes < nbytes)) { 885 if (unlikely(max_nbytes < nbytes)) {
@@ -1230,7 +1245,7 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1230 1245
1231 ce_ring->per_transfer_context[i] = NULL; 1246 ce_ring->per_transfer_context[i] = NULL;
1232 1247
1233 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 1248 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1234 skb->len + skb_tailroom(skb), 1249 skb->len + skb_tailroom(skb),
1235 DMA_FROM_DEVICE); 1250 DMA_FROM_DEVICE);
1236 dev_kfree_skb_any(skb); 1251 dev_kfree_skb_any(skb);
@@ -1498,6 +1513,35 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1498 return 0; 1513 return 0;
1499} 1514}
1500 1515
1516static int ath10k_pci_get_num_banks(struct ath10k *ar)
1517{
1518 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1519
1520 switch (ar_pci->pdev->device) {
1521 case QCA988X_2_0_DEVICE_ID:
1522 return 1;
1523 case QCA6174_2_1_DEVICE_ID:
1524 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1525 case QCA6174_HW_1_0_CHIP_ID_REV:
1526 case QCA6174_HW_1_1_CHIP_ID_REV:
1527 return 3;
1528 case QCA6174_HW_1_3_CHIP_ID_REV:
1529 return 2;
1530 case QCA6174_HW_2_1_CHIP_ID_REV:
1531 case QCA6174_HW_2_2_CHIP_ID_REV:
1532 return 6;
1533 case QCA6174_HW_3_0_CHIP_ID_REV:
1534 case QCA6174_HW_3_1_CHIP_ID_REV:
1535 case QCA6174_HW_3_2_CHIP_ID_REV:
1536 return 9;
1537 }
1538 break;
1539 }
1540
1541 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1542 return 1;
1543}
1544
1501static int ath10k_pci_init_config(struct ath10k *ar) 1545static int ath10k_pci_init_config(struct ath10k *ar)
1502{ 1546{
1503 u32 interconnect_targ_addr; 1547 u32 interconnect_targ_addr;
@@ -1608,7 +1652,8 @@ static int ath10k_pci_init_config(struct ath10k *ar)
1608 /* first bank is switched to IRAM */ 1652 /* first bank is switched to IRAM */
1609 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 1653 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1610 HI_EARLY_ALLOC_MAGIC_MASK); 1654 HI_EARLY_ALLOC_MAGIC_MASK);
1611 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & 1655 ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
1656 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1612 HI_EARLY_ALLOC_IRAM_BANKS_MASK); 1657 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1613 1658
1614 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); 1659 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
@@ -1804,12 +1849,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
1804 return 0; 1849 return 0;
1805} 1850}
1806 1851
1807static int ath10k_pci_chip_reset(struct ath10k *ar) 1852static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
1808{ 1853{
1809 int i, ret; 1854 int i, ret;
1810 u32 val; 1855 u32 val;
1811 1856
1812 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n"); 1857 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
1813 1858
1814 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. 1859 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
1815 * It is thus preferred to use warm reset which is safer but may not be 1860 * It is thus preferred to use warm reset which is safer but may not be
@@ -1873,11 +1918,53 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
1873 return ret; 1918 return ret;
1874 } 1919 }
1875 1920
1876 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n"); 1921 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
1922
1923 return 0;
1924}
1925
1926static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
1927{
1928 int ret;
1929
1930 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
1931
1932 /* FIXME: QCA6174 requires cold + warm reset to work. */
1933
1934 ret = ath10k_pci_cold_reset(ar);
1935 if (ret) {
1936 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
1937 return ret;
1938 }
1939
1940 ret = ath10k_pci_wait_for_target_init(ar);
1941 if (ret) {
1942 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
1943 ret);
1944 return ret;
1945 }
1946
1947 ret = ath10k_pci_warm_reset(ar);
1948 if (ret) {
1949 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
1950 return ret;
1951 }
1952
1953 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
1877 1954
1878 return 0; 1955 return 0;
1879} 1956}
1880 1957
1958static int ath10k_pci_chip_reset(struct ath10k *ar)
1959{
1960 if (QCA_REV_988X(ar))
1961 return ath10k_pci_qca988x_chip_reset(ar);
1962 else if (QCA_REV_6174(ar))
1963 return ath10k_pci_qca6174_chip_reset(ar);
1964 else
1965 return -ENOTSUPP;
1966}
1967
1881static int ath10k_pci_hif_power_up(struct ath10k *ar) 1968static int ath10k_pci_hif_power_up(struct ath10k *ar)
1882{ 1969{
1883 int ret; 1970 int ret;
@@ -1902,6 +1989,12 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
1902 */ 1989 */
1903 ret = ath10k_pci_chip_reset(ar); 1990 ret = ath10k_pci_chip_reset(ar);
1904 if (ret) { 1991 if (ret) {
1992 if (ath10k_pci_has_fw_crashed(ar)) {
1993 ath10k_warn(ar, "firmware crashed during chip reset\n");
1994 ath10k_pci_fw_crashed_clear(ar);
1995 ath10k_pci_fw_crashed_dump(ar);
1996 }
1997
1905 ath10k_err(ar, "failed to reset chip: %d\n", ret); 1998 ath10k_err(ar, "failed to reset chip: %d\n", ret);
1906 goto err_sleep; 1999 goto err_sleep;
1907 } 2000 }
@@ -2033,6 +2126,7 @@ static void ath10k_msi_err_tasklet(unsigned long data)
2033 return; 2126 return;
2034 } 2127 }
2035 2128
2129 ath10k_pci_irq_disable(ar);
2036 ath10k_pci_fw_crashed_clear(ar); 2130 ath10k_pci_fw_crashed_clear(ar);
2037 ath10k_pci_fw_crashed_dump(ar); 2131 ath10k_pci_fw_crashed_dump(ar);
2038} 2132}
@@ -2102,6 +2196,7 @@ static void ath10k_pci_tasklet(unsigned long data)
2102 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2196 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2103 2197
2104 if (ath10k_pci_has_fw_crashed(ar)) { 2198 if (ath10k_pci_has_fw_crashed(ar)) {
2199 ath10k_pci_irq_disable(ar);
2105 ath10k_pci_fw_crashed_clear(ar); 2200 ath10k_pci_fw_crashed_clear(ar);
2106 ath10k_pci_fw_crashed_dump(ar); 2201 ath10k_pci_fw_crashed_dump(ar);
2107 return; 2202 return;
@@ -2344,8 +2439,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2344 2439
2345 if (val & FW_IND_EVENT_PENDING) { 2440 if (val & FW_IND_EVENT_PENDING) {
2346 ath10k_warn(ar, "device has crashed during init\n"); 2441 ath10k_warn(ar, "device has crashed during init\n");
2347 ath10k_pci_fw_crashed_clear(ar);
2348 ath10k_pci_fw_crashed_dump(ar);
2349 return -ECOMM; 2442 return -ECOMM;
2350 } 2443 }
2351 2444
@@ -2476,17 +2569,46 @@ static void ath10k_pci_release(struct ath10k *ar)
2476 pci_disable_device(pdev); 2569 pci_disable_device(pdev);
2477} 2570}
2478 2571
2572static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
2573{
2574 const struct ath10k_pci_supp_chip *supp_chip;
2575 int i;
2576 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
2577
2578 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
2579 supp_chip = &ath10k_pci_supp_chips[i];
2580
2581 if (supp_chip->dev_id == dev_id &&
2582 supp_chip->rev_id == rev_id)
2583 return true;
2584 }
2585
2586 return false;
2587}
2588
2479static int ath10k_pci_probe(struct pci_dev *pdev, 2589static int ath10k_pci_probe(struct pci_dev *pdev,
2480 const struct pci_device_id *pci_dev) 2590 const struct pci_device_id *pci_dev)
2481{ 2591{
2482 int ret = 0; 2592 int ret = 0;
2483 struct ath10k *ar; 2593 struct ath10k *ar;
2484 struct ath10k_pci *ar_pci; 2594 struct ath10k_pci *ar_pci;
2595 enum ath10k_hw_rev hw_rev;
2485 u32 chip_id; 2596 u32 chip_id;
2486 2597
2487 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, 2598 switch (pci_dev->device) {
2488 ATH10K_BUS_PCI, 2599 case QCA988X_2_0_DEVICE_ID:
2489 &ath10k_pci_hif_ops); 2600 hw_rev = ATH10K_HW_QCA988X;
2601 break;
2602 case QCA6174_2_1_DEVICE_ID:
2603 hw_rev = ATH10K_HW_QCA6174;
2604 break;
2605 default:
2606 WARN_ON(1);
2607 return -ENOTSUPP;
2608 }
2609
2610 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
2611 hw_rev, &ath10k_pci_hif_ops);
2490 if (!ar) { 2612 if (!ar) {
2491 dev_err(&pdev->dev, "failed to allocate core\n"); 2613 dev_err(&pdev->dev, "failed to allocate core\n");
2492 return -ENOMEM; 2614 return -ENOMEM;
@@ -2515,12 +2637,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2515 goto err_release; 2637 goto err_release;
2516 } 2638 }
2517 2639
2518 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2519 if (chip_id == 0xffffffff) {
2520 ath10k_err(ar, "failed to get chip id\n");
2521 goto err_sleep;
2522 }
2523
2524 ret = ath10k_pci_alloc_pipes(ar); 2640 ret = ath10k_pci_alloc_pipes(ar);
2525 if (ret) { 2641 if (ret) {
2526 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 2642 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
@@ -2547,6 +2663,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2547 goto err_deinit_irq; 2663 goto err_deinit_irq;
2548 } 2664 }
2549 2665
2666 ret = ath10k_pci_chip_reset(ar);
2667 if (ret) {
2668 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2669 goto err_free_irq;
2670 }
2671
2672 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2673 if (chip_id == 0xffffffff) {
2674 ath10k_err(ar, "failed to get chip id\n");
2675 goto err_free_irq;
2676 }
2677
2678 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
2679 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
2680 pdev->device, chip_id);
2681 goto err_sleep;
2682 }
2683
2550 ath10k_pci_sleep(ar); 2684 ath10k_pci_sleep(ar);
2551 2685
2552 ret = ath10k_core_register(ar, chip_id); 2686 ret = ath10k_core_register(ar, chip_id);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index cf36511c7f4d..bddf54320160 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -152,6 +152,11 @@ struct ath10k_pci_pipe {
152 struct tasklet_struct intr; 152 struct tasklet_struct intr;
153}; 153};
154 154
155struct ath10k_pci_supp_chip {
156 u32 dev_id;
157 u32 rev_id;
158};
159
155struct ath10k_pci { 160struct ath10k_pci {
156 struct pci_dev *pdev; 161 struct pci_dev *pdev;
157 struct device *dev; 162 struct device *dev;
@@ -189,7 +194,7 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
189 194
190#define ATH10K_PCI_RX_POST_RETRY_MS 50 195#define ATH10K_PCI_RX_POST_RETRY_MS 50
191#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ 196#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
192#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ 197#define PCIE_WAKE_TIMEOUT 10000 /* 10ms */
193 198
194#define BAR_NUM 0 199#define BAR_NUM 0
195 200
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index e1ffdd57a18c..e9cc7787bf5f 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -850,7 +850,7 @@ struct rx_ppdu_start {
850 850
851#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15) 851#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
852 852
853struct rx_ppdu_end { 853struct rx_ppdu_end_common {
854 __le32 evm_p0; 854 __le32 evm_p0;
855 __le32 evm_p1; 855 __le32 evm_p1;
856 __le32 evm_p2; 856 __le32 evm_p2;
@@ -873,10 +873,33 @@ struct rx_ppdu_end {
873 u8 phy_err_code; 873 u8 phy_err_code;
874 __le16 flags; /* %RX_PPDU_END_FLAGS_ */ 874 __le16 flags; /* %RX_PPDU_END_FLAGS_ */
875 __le32 info0; /* %RX_PPDU_END_INFO0_ */ 875 __le32 info0; /* %RX_PPDU_END_INFO0_ */
876} __packed;
877
878struct rx_ppdu_end_qca988x {
876 __le16 bb_length; 879 __le16 bb_length;
877 __le16 info1; /* %RX_PPDU_END_INFO1_ */ 880 __le16 info1; /* %RX_PPDU_END_INFO1_ */
878} __packed; 881} __packed;
879 882
883#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
884#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0
885#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000
886#define RX_PPDU_END_RTT_UNUSED_LSB 24
887#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
888
889struct rx_ppdu_end_qca6174 {
890 __le32 rtt; /* %RX_PPDU_END_RTT_ */
891 __le16 bb_length;
892 __le16 info1; /* %RX_PPDU_END_INFO1_ */
893} __packed;
894
895struct rx_ppdu_end {
896 struct rx_ppdu_end_common common;
897 union {
898 struct rx_ppdu_end_qca988x qca988x;
899 struct rx_ppdu_end_qca6174 qca6174;
900 } __packed;
901} __packed;
902
880/* 903/*
881 * evm_p0 904 * evm_p0
882 * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3. 905 * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 63ce61fcdac8..d22addf6118b 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -17,6 +17,7 @@
17#include <linux/relay.h> 17#include <linux/relay.h>
18#include "core.h" 18#include "core.h"
19#include "debug.h" 19#include "debug.h"
20#include "wmi-ops.h"
20 21
21static void send_fft_sample(struct ath10k *ar, 22static void send_fft_sample(struct ath10k *ar,
22 const struct fft_sample_tlv *fft_sample_tlv) 23 const struct fft_sample_tlv *fft_sample_tlv)
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 9d0ae30f9ff1..a417aae52623 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -18,6 +18,8 @@
18#ifndef __TARGADDRS_H__ 18#ifndef __TARGADDRS_H__
19#define __TARGADDRS_H__ 19#define __TARGADDRS_H__
20 20
21#include "hw.h"
22
21/* 23/*
22 * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the 24 * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
23 * host_interest structure. It must match the address of the _host_interest 25 * host_interest structure. It must match the address of the _host_interest
@@ -445,4 +447,7 @@ Fw Mode/SubMode Mask
445#define QCA988X_BOARD_DATA_SZ 7168 447#define QCA988X_BOARD_DATA_SZ 7168
446#define QCA988X_BOARD_EXT_DATA_SZ 0 448#define QCA988X_BOARD_EXT_DATA_SZ 0
447 449
450#define QCA6174_BOARD_DATA_SZ 8192
451#define QCA6174_BOARD_EXT_DATA_SZ 0
452
448#endif /* __TARGADDRS_H__ */ 453#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 483db9cb8c96..b084f88da102 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -187,13 +187,14 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
187 187
188 memcpy(ar->testmode.orig_fw_features, ar->fw_features, 188 memcpy(ar->testmode.orig_fw_features, ar->fw_features,
189 sizeof(ar->fw_features)); 189 sizeof(ar->fw_features));
190 ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
190 191
191 /* utf.bin firmware image does not advertise firmware features. Do 192 /* utf.bin firmware image does not advertise firmware features. Do
192 * an ugly hack where we force the firmware features so that wmi.c 193 * an ugly hack where we force the firmware features so that wmi.c
193 * will use the correct WMI interface. 194 * will use the correct WMI interface.
194 */ 195 */
195 memset(ar->fw_features, 0, sizeof(ar->fw_features)); 196 memset(ar->fw_features, 0, sizeof(ar->fw_features));
196 __set_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features); 197 ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
197 198
198 ret = ath10k_hif_power_up(ar); 199 ret = ath10k_hif_power_up(ar);
199 if (ret) { 200 if (ret) {
@@ -224,6 +225,7 @@ err_fw_features:
224 /* return the original firmware features */ 225 /* return the original firmware features */
225 memcpy(ar->fw_features, ar->testmode.orig_fw_features, 226 memcpy(ar->fw_features, ar->testmode.orig_fw_features,
226 sizeof(ar->fw_features)); 227 sizeof(ar->fw_features));
228 ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
227 229
228 release_firmware(ar->testmode.utf); 230 release_firmware(ar->testmode.utf);
229 ar->testmode.utf = NULL; 231 ar->testmode.utf = NULL;
@@ -250,6 +252,7 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
250 /* return the original firmware features */ 252 /* return the original firmware features */
251 memcpy(ar->fw_features, ar->testmode.orig_fw_features, 253 memcpy(ar->fw_features, ar->testmode.orig_fw_features,
252 sizeof(ar->fw_features)); 254 sizeof(ar->fw_features));
255 ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
253 256
254 release_firmware(ar->testmode.utf); 257 release_firmware(ar->testmode.utf);
255 ar->testmode.utf = NULL; 258 ar->testmode.utf = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
new file mode 100644
index 000000000000..aede750809fe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright (c) 2014 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/device.h>
18#include <linux/sysfs.h>
19#include <linux/thermal.h>
20#include <linux/hwmon.h>
21#include <linux/hwmon-sysfs.h>
22#include "core.h"
23#include "debug.h"
24#include "wmi-ops.h"
25
26static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
27 enum wmi_vdev_type type)
28{
29 struct ath10k_vif *arvif;
30 int count = 0;
31
32 lockdep_assert_held(&ar->conf_mutex);
33
34 list_for_each_entry(arvif, &ar->arvifs, list) {
35 if (!arvif->is_started)
36 continue;
37
38 if (!arvif->is_up)
39 continue;
40
41 if (arvif->vdev_type != type)
42 continue;
43
44 count++;
45 }
46 return count;
47}
48
49static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
50 unsigned long *state)
51{
52 *state = ATH10K_QUIET_DUTY_CYCLE_MAX;
53
54 return 0;
55}
56
57static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
58 unsigned long *state)
59{
60 struct ath10k *ar = cdev->devdata;
61
62 mutex_lock(&ar->conf_mutex);
63 *state = ar->thermal.duty_cycle;
64 mutex_unlock(&ar->conf_mutex);
65
66 return 0;
67}
68
69static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
70 unsigned long duty_cycle)
71{
72 struct ath10k *ar = cdev->devdata;
73 u32 period, duration, enabled;
74 int num_bss, ret = 0;
75
76 mutex_lock(&ar->conf_mutex);
77 if (ar->state != ATH10K_STATE_ON) {
78 ret = -ENETDOWN;
79 goto out;
80 }
81
82 if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
83 ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
84 duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
85 ret = -EINVAL;
86 goto out;
87 }
88 /* TODO: Right now, thermal mitigation is handled only for single/multi
89 * vif AP mode. Since quiet param is not validated in STA mode, it needs
90 * to be investigated further to handle multi STA and multi-vif (AP+STA)
91 * mode properly.
92 */
93 num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
94 if (!num_bss) {
95 ath10k_warn(ar, "no active AP interfaces\n");
96 ret = -ENETDOWN;
97 goto out;
98 }
99 period = max(ATH10K_QUIET_PERIOD_MIN,
100 (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
101 duration = (period * duty_cycle) / 100;
102 enabled = duration ? 1 : 0;
103
104 ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
105 ATH10K_QUIET_START_OFFSET,
106 enabled);
107 if (ret) {
108 ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
109 period, duration, enabled, ret);
110 goto out;
111 }
112 ar->thermal.duty_cycle = duty_cycle;
113out:
114 mutex_unlock(&ar->conf_mutex);
115 return ret;
116}
117
118static struct thermal_cooling_device_ops ath10k_thermal_ops = {
119 .get_max_state = ath10k_thermal_get_max_dutycycle,
120 .get_cur_state = ath10k_thermal_get_cur_dutycycle,
121 .set_cur_state = ath10k_thermal_set_cur_dutycycle,
122};
123
124static ssize_t ath10k_thermal_show_temp(struct device *dev,
125 struct device_attribute *attr,
126 char *buf)
127{
128 struct ath10k *ar = dev_get_drvdata(dev);
129 int ret, temperature;
130
131 mutex_lock(&ar->conf_mutex);
132
133 /* Can't get temperature when the card is off */
134 if (ar->state != ATH10K_STATE_ON) {
135 ret = -ENETDOWN;
136 goto out;
137 }
138
139 reinit_completion(&ar->thermal.wmi_sync);
140 ret = ath10k_wmi_pdev_get_temperature(ar);
141 if (ret) {
142 ath10k_warn(ar, "failed to read temperature %d\n", ret);
143 goto out;
144 }
145
146 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
147 ret = -ESHUTDOWN;
148 goto out;
149 }
150
151 ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
152 ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
153 if (ret == 0) {
154 ath10k_warn(ar, "failed to synchronize thermal read\n");
155 ret = -ETIMEDOUT;
156 goto out;
157 }
158
159 spin_lock_bh(&ar->data_lock);
160 temperature = ar->thermal.temperature;
161 spin_unlock_bh(&ar->data_lock);
162
163 /* display in millidegree celcius */
164 ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
165out:
166 mutex_unlock(&ar->conf_mutex);
167 return ret;
168}
169
170void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature)
171{
172 spin_lock_bh(&ar->data_lock);
173 ar->thermal.temperature = temperature;
174 spin_unlock_bh(&ar->data_lock);
175 complete(&ar->thermal.wmi_sync);
176}
177
178static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ath10k_thermal_show_temp,
179 NULL, 0);
180
181static struct attribute *ath10k_hwmon_attrs[] = {
182 &sensor_dev_attr_temp1_input.dev_attr.attr,
183 NULL,
184};
185ATTRIBUTE_GROUPS(ath10k_hwmon);
186
187int ath10k_thermal_register(struct ath10k *ar)
188{
189 struct thermal_cooling_device *cdev;
190 struct device *hwmon_dev;
191 int ret;
192
193 cdev = thermal_cooling_device_register("ath10k_thermal", ar,
194 &ath10k_thermal_ops);
195
196 if (IS_ERR(cdev)) {
197 ath10k_err(ar, "failed to setup thermal device result: %ld\n",
198 PTR_ERR(cdev));
199 return -EINVAL;
200 }
201
202 ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
203 "cooling_device");
204 if (ret) {
205 ath10k_err(ar, "failed to create thermal symlink\n");
206 goto err_cooling_destroy;
207 }
208
209 ar->thermal.cdev = cdev;
210
211 /* Do not register hwmon device when temperature reading is not
212 * supported by firmware
213 */
214 if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4)
215 return 0;
216
217 /* Avoid linking error on devm_hwmon_device_register_with_groups, I
218 * guess linux/hwmon.h is missing proper stubs. */
219 if (!config_enabled(CONFIG_HWMON))
220 return 0;
221
222 hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
223 "ath10k_hwmon", ar,
224 ath10k_hwmon_groups);
225 if (IS_ERR(hwmon_dev)) {
226 ath10k_err(ar, "failed to register hwmon device: %ld\n",
227 PTR_ERR(hwmon_dev));
228 ret = -EINVAL;
229 goto err_remove_link;
230 }
231 return 0;
232
233err_remove_link:
234 sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
235err_cooling_destroy:
236 thermal_cooling_device_unregister(cdev);
237 return ret;
238}
239
240void ath10k_thermal_unregister(struct ath10k *ar)
241{
242 thermal_cooling_device_unregister(ar->thermal.cdev);
243 sysfs_remove_link(&ar->dev->kobj, "cooling_device");
244}
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
new file mode 100644
index 000000000000..bccc17ae0fde
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright (c) 2014 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#ifndef _THERMAL_
17#define _THERMAL_
18
19#define ATH10K_QUIET_PERIOD_DEFAULT 100
20#define ATH10K_QUIET_PERIOD_MIN 25
21#define ATH10K_QUIET_START_OFFSET 10
22#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
23#define ATH10K_HWMON_NAME_LEN 15
24#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
25
26struct ath10k_thermal {
27 struct thermal_cooling_device *cdev;
28 struct completion wmi_sync;
29
30 /* protected by conf_mutex */
31 u32 duty_cycle;
32 /* temperature value in Celcius degree
33 * protected by data_lock
34 */
35 int temperature;
36};
37
38#ifdef CONFIG_THERMAL
39int ath10k_thermal_register(struct ath10k *ar);
40void ath10k_thermal_unregister(struct ath10k *ar);
41void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
42#else
43static inline int ath10k_thermal_register(struct ath10k *ar)
44{
45 return 0;
46}
47
48static inline void ath10k_thermal_unregister(struct ath10k *ar)
49{
50}
51
52static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
53 int temperature)
54{
55}
56
57#endif
58#endif /* _THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index b289378b6e3e..5407887380ab 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -453,6 +453,74 @@ TRACE_EVENT(ath10k_htt_rx_desc,
453 ) 453 )
454); 454);
455 455
456TRACE_EVENT(ath10k_wmi_diag_container,
457 TP_PROTO(struct ath10k *ar,
458 u8 type,
459 u32 timestamp,
460 u32 code,
461 u16 len,
462 const void *data),
463
464 TP_ARGS(ar, type, timestamp, code, len, data),
465
466 TP_STRUCT__entry(
467 __string(device, dev_name(ar->dev))
468 __string(driver, dev_driver_string(ar->dev))
469 __field(u8, type)
470 __field(u32, timestamp)
471 __field(u32, code)
472 __field(u16, len)
473 __dynamic_array(u8, data, len)
474 ),
475
476 TP_fast_assign(
477 __assign_str(device, dev_name(ar->dev));
478 __assign_str(driver, dev_driver_string(ar->dev));
479 __entry->type = type;
480 __entry->timestamp = timestamp;
481 __entry->code = code;
482 __entry->len = len;
483 memcpy(__get_dynamic_array(data), data, len);
484 ),
485
486 TP_printk(
487 "%s %s diag container type %hhu timestamp %u code %u len %d",
488 __get_str(driver),
489 __get_str(device),
490 __entry->type,
491 __entry->timestamp,
492 __entry->code,
493 __entry->len
494 )
495);
496
497TRACE_EVENT(ath10k_wmi_diag,
498 TP_PROTO(struct ath10k *ar, const void *data, size_t len),
499
500 TP_ARGS(ar, data, len),
501
502 TP_STRUCT__entry(
503 __string(device, dev_name(ar->dev))
504 __string(driver, dev_driver_string(ar->dev))
505 __field(u16, len)
506 __dynamic_array(u8, data, len)
507 ),
508
509 TP_fast_assign(
510 __assign_str(device, dev_name(ar->dev));
511 __assign_str(driver, dev_driver_string(ar->dev));
512 __entry->len = len;
513 memcpy(__get_dynamic_array(data), data, len);
514 ),
515
516 TP_printk(
517 "%s %s tlv diag len %d",
518 __get_str(driver),
519 __get_str(device),
520 __entry->len
521 )
522);
523
456#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ 524#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
457 525
458/* we don't want to use include/trace/events */ 526/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 7579de8e7a8c..3f00cec8aef5 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -64,7 +64,13 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
64 return; 64 return;
65 } 65 }
66 66
67 msdu = htt->pending_tx[tx_done->msdu_id]; 67 msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
68 if (!msdu) {
69 ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
70 tx_done->msdu_id);
71 return;
72 }
73
68 skb_cb = ATH10K_SKB_CB(msdu); 74 skb_cb = ATH10K_SKB_CB(msdu);
69 75
70 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 76 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
@@ -95,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
95 /* we do not own the msdu anymore */ 101 /* we do not own the msdu anymore */
96 102
97exit: 103exit:
98 htt->pending_tx[tx_done->msdu_id] = NULL;
99 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); 104 ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
100 __ath10k_htt_tx_dec_pending(htt); 105 __ath10k_htt_tx_dec_pending(htt);
101 if (htt->num_pending_tx == 0) 106 if (htt->num_pending_tx == 0)
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
new file mode 100644
index 000000000000..04dc4b9db04e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -0,0 +1,1064 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _WMI_OPS_H_
19#define _WMI_OPS_H_
20
21struct ath10k;
22struct sk_buff;
23
24struct wmi_ops {
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_ev_arg *arg);
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_svc_rdy_ev_arg *arg);
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_rdy_ev_arg *arg);
46 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
47 struct ath10k_fw_stats *stats);
48
49 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
50 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
51 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
52 u16 rd5g, u16 ctl2g, u16 ctl5g,
53 enum wmi_dfs_region dfs_reg);
54 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
55 u32 value);
56 struct sk_buff *(*gen_init)(struct ath10k *ar);
57 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
58 const struct wmi_start_scan_arg *arg);
59 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
60 const struct wmi_stop_scan_arg *arg);
61 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
62 enum wmi_vdev_type type,
63 enum wmi_vdev_subtype subtype,
64 const u8 macaddr[ETH_ALEN]);
65 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
66 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
67 const struct wmi_vdev_start_request_arg *arg,
68 bool restart);
69 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
70 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
71 const u8 *bssid);
72 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
74 u32 param_id, u32 param_value);
75 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
76 const struct wmi_vdev_install_key_arg *arg);
77 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
78 const struct wmi_vdev_spectral_conf_arg *arg);
79 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
80 u32 trigger, u32 enable);
81 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
82 const struct wmi_wmm_params_all_arg *arg);
83 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
84 const u8 peer_addr[ETH_ALEN]);
85 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
86 const u8 peer_addr[ETH_ALEN]);
87 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
88 const u8 peer_addr[ETH_ALEN],
89 u32 tid_bitmap);
90 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
91 const u8 *peer_addr,
92 enum wmi_peer_param param_id,
93 u32 param_value);
94 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
95 const struct wmi_peer_assoc_complete_arg *arg);
96 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
97 enum wmi_sta_ps_mode psmode);
98 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
99 enum wmi_sta_powersave_param param_id,
100 u32 value);
101 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
102 const u8 *mac,
103 enum wmi_ap_ps_peer_param param_id,
104 u32 value);
105 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
106 const struct wmi_scan_chan_list_arg *arg);
107 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
108 const void *bcn, size_t bcn_len,
109 u32 bcn_paddr, bool dtim_zero,
110 bool deliver_cab);
111 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
112 const struct wmi_wmm_params_all_arg *arg);
113 struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
114 enum wmi_stats_id stats_id);
115 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
116 enum wmi_force_fw_hang_type type,
117 u32 delay_ms);
118 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
119 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
120 u32 log_level);
121 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
122 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
123 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
124 u32 period, u32 duration,
125 u32 next_offset,
126 u32 enabled);
127 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
128 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
129 const u8 *mac);
130 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
131 const u8 *mac, u32 tid, u32 buf_size);
132 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
133 const u8 *mac, u32 tid,
134 u32 status);
135 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
136 const u8 *mac, u32 tid, u32 initiator,
137 u32 reason);
138 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
139 u32 tim_ie_offset, struct sk_buff *bcn,
140 u32 prb_caps, u32 prb_erp,
141 void *prb_ies, size_t prb_ies_len);
142 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
143 struct sk_buff *bcn);
144 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
145 const u8 *p2p_ie);
146 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
147 const u8 peer_addr[ETH_ALEN],
148 const struct wmi_sta_uapsd_auto_trig_arg *args,
149 u32 num_ac);
150 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
151 const struct wmi_sta_keepalive_arg *arg);
152};
153
154int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
155
156static inline int
157ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
158{
159 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
160 return -EOPNOTSUPP;
161
162 ar->wmi.ops->rx(ar, skb);
163 return 0;
164}
165
166static inline int
167ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
168 size_t len)
169{
170 if (!ar->wmi.ops->map_svc)
171 return -EOPNOTSUPP;
172
173 ar->wmi.ops->map_svc(in, out, len);
174 return 0;
175}
176
177static inline int
178ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
179 struct wmi_scan_ev_arg *arg)
180{
181 if (!ar->wmi.ops->pull_scan)
182 return -EOPNOTSUPP;
183
184 return ar->wmi.ops->pull_scan(ar, skb, arg);
185}
186
187static inline int
188ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
189 struct wmi_mgmt_rx_ev_arg *arg)
190{
191 if (!ar->wmi.ops->pull_mgmt_rx)
192 return -EOPNOTSUPP;
193
194 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
195}
196
197static inline int
198ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
199 struct wmi_ch_info_ev_arg *arg)
200{
201 if (!ar->wmi.ops->pull_ch_info)
202 return -EOPNOTSUPP;
203
204 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
205}
206
207static inline int
208ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
209 struct wmi_vdev_start_ev_arg *arg)
210{
211 if (!ar->wmi.ops->pull_vdev_start)
212 return -EOPNOTSUPP;
213
214 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
215}
216
217static inline int
218ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
219 struct wmi_peer_kick_ev_arg *arg)
220{
221 if (!ar->wmi.ops->pull_peer_kick)
222 return -EOPNOTSUPP;
223
224 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
225}
226
227static inline int
228ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
229 struct wmi_swba_ev_arg *arg)
230{
231 if (!ar->wmi.ops->pull_swba)
232 return -EOPNOTSUPP;
233
234 return ar->wmi.ops->pull_swba(ar, skb, arg);
235}
236
237static inline int
238ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
239 struct wmi_phyerr_ev_arg *arg)
240{
241 if (!ar->wmi.ops->pull_phyerr)
242 return -EOPNOTSUPP;
243
244 return ar->wmi.ops->pull_phyerr(ar, skb, arg);
245}
246
247static inline int
248ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
249 struct wmi_svc_rdy_ev_arg *arg)
250{
251 if (!ar->wmi.ops->pull_svc_rdy)
252 return -EOPNOTSUPP;
253
254 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
255}
256
257static inline int
258ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
259 struct wmi_rdy_ev_arg *arg)
260{
261 if (!ar->wmi.ops->pull_rdy)
262 return -EOPNOTSUPP;
263
264 return ar->wmi.ops->pull_rdy(ar, skb, arg);
265}
266
267static inline int
268ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
269 struct ath10k_fw_stats *stats)
270{
271 if (!ar->wmi.ops->pull_fw_stats)
272 return -EOPNOTSUPP;
273
274 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
275}
276
277static inline int
278ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
279{
280 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
281 struct sk_buff *skb;
282 int ret;
283
284 if (!ar->wmi.ops->gen_mgmt_tx)
285 return -EOPNOTSUPP;
286
287 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
288 if (IS_ERR(skb))
289 return PTR_ERR(skb);
290
291 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
292 if (ret)
293 return ret;
294
295 /* FIXME There's no ACK event for Management Tx. This probably
296 * shouldn't be called here either. */
297 info->flags |= IEEE80211_TX_STAT_ACK;
298 ieee80211_tx_status_irqsafe(ar->hw, msdu);
299
300 return 0;
301}
302
303static inline int
304ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
305 u16 ctl2g, u16 ctl5g,
306 enum wmi_dfs_region dfs_reg)
307{
308 struct sk_buff *skb;
309
310 if (!ar->wmi.ops->gen_pdev_set_rd)
311 return -EOPNOTSUPP;
312
313 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
314 dfs_reg);
315 if (IS_ERR(skb))
316 return PTR_ERR(skb);
317
318 return ath10k_wmi_cmd_send(ar, skb,
319 ar->wmi.cmd->pdev_set_regdomain_cmdid);
320}
321
322static inline int
323ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
324{
325 struct sk_buff *skb;
326
327 if (!ar->wmi.ops->gen_pdev_suspend)
328 return -EOPNOTSUPP;
329
330 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
331 if (IS_ERR(skb))
332 return PTR_ERR(skb);
333
334 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
335}
336
337static inline int
338ath10k_wmi_pdev_resume_target(struct ath10k *ar)
339{
340 struct sk_buff *skb;
341
342 if (!ar->wmi.ops->gen_pdev_resume)
343 return -EOPNOTSUPP;
344
345 skb = ar->wmi.ops->gen_pdev_resume(ar);
346 if (IS_ERR(skb))
347 return PTR_ERR(skb);
348
349 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
350}
351
352static inline int
353ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
354{
355 struct sk_buff *skb;
356
357 if (!ar->wmi.ops->gen_pdev_set_param)
358 return -EOPNOTSUPP;
359
360 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
361 if (IS_ERR(skb))
362 return PTR_ERR(skb);
363
364 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
365}
366
367static inline int
368ath10k_wmi_cmd_init(struct ath10k *ar)
369{
370 struct sk_buff *skb;
371
372 if (!ar->wmi.ops->gen_init)
373 return -EOPNOTSUPP;
374
375 skb = ar->wmi.ops->gen_init(ar);
376 if (IS_ERR(skb))
377 return PTR_ERR(skb);
378
379 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
380}
381
382static inline int
383ath10k_wmi_start_scan(struct ath10k *ar,
384 const struct wmi_start_scan_arg *arg)
385{
386 struct sk_buff *skb;
387
388 if (!ar->wmi.ops->gen_start_scan)
389 return -EOPNOTSUPP;
390
391 skb = ar->wmi.ops->gen_start_scan(ar, arg);
392 if (IS_ERR(skb))
393 return PTR_ERR(skb);
394
395 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
396}
397
398static inline int
399ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
400{
401 struct sk_buff *skb;
402
403 if (!ar->wmi.ops->gen_stop_scan)
404 return -EOPNOTSUPP;
405
406 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
407 if (IS_ERR(skb))
408 return PTR_ERR(skb);
409
410 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
411}
412
413static inline int
414ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
415 enum wmi_vdev_type type,
416 enum wmi_vdev_subtype subtype,
417 const u8 macaddr[ETH_ALEN])
418{
419 struct sk_buff *skb;
420
421 if (!ar->wmi.ops->gen_vdev_create)
422 return -EOPNOTSUPP;
423
424 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
425 if (IS_ERR(skb))
426 return PTR_ERR(skb);
427
428 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
429}
430
431static inline int
432ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
433{
434 struct sk_buff *skb;
435
436 if (!ar->wmi.ops->gen_vdev_delete)
437 return -EOPNOTSUPP;
438
439 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
440 if (IS_ERR(skb))
441 return PTR_ERR(skb);
442
443 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
444}
445
446static inline int
447ath10k_wmi_vdev_start(struct ath10k *ar,
448 const struct wmi_vdev_start_request_arg *arg)
449{
450 struct sk_buff *skb;
451
452 if (!ar->wmi.ops->gen_vdev_start)
453 return -EOPNOTSUPP;
454
455 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
456 if (IS_ERR(skb))
457 return PTR_ERR(skb);
458
459 return ath10k_wmi_cmd_send(ar, skb,
460 ar->wmi.cmd->vdev_start_request_cmdid);
461}
462
463static inline int
464ath10k_wmi_vdev_restart(struct ath10k *ar,
465 const struct wmi_vdev_start_request_arg *arg)
466{
467 struct sk_buff *skb;
468
469 if (!ar->wmi.ops->gen_vdev_start)
470 return -EOPNOTSUPP;
471
472 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
473 if (IS_ERR(skb))
474 return PTR_ERR(skb);
475
476 return ath10k_wmi_cmd_send(ar, skb,
477 ar->wmi.cmd->vdev_restart_request_cmdid);
478}
479
480static inline int
481ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
482{
483 struct sk_buff *skb;
484
485 if (!ar->wmi.ops->gen_vdev_stop)
486 return -EOPNOTSUPP;
487
488 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
489 if (IS_ERR(skb))
490 return PTR_ERR(skb);
491
492 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
493}
494
495static inline int
496ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
497{
498 struct sk_buff *skb;
499
500 if (!ar->wmi.ops->gen_vdev_up)
501 return -EOPNOTSUPP;
502
503 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
504 if (IS_ERR(skb))
505 return PTR_ERR(skb);
506
507 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
508}
509
510static inline int
511ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
512{
513 struct sk_buff *skb;
514
515 if (!ar->wmi.ops->gen_vdev_down)
516 return -EOPNOTSUPP;
517
518 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
519 if (IS_ERR(skb))
520 return PTR_ERR(skb);
521
522 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
523}
524
525static inline int
526ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
527 u32 param_value)
528{
529 struct sk_buff *skb;
530
531 if (!ar->wmi.ops->gen_vdev_set_param)
532 return -EOPNOTSUPP;
533
534 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
535 param_value);
536 if (IS_ERR(skb))
537 return PTR_ERR(skb);
538
539 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
540}
541
542static inline int
543ath10k_wmi_vdev_install_key(struct ath10k *ar,
544 const struct wmi_vdev_install_key_arg *arg)
545{
546 struct sk_buff *skb;
547
548 if (!ar->wmi.ops->gen_vdev_install_key)
549 return -EOPNOTSUPP;
550
551 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
552 if (IS_ERR(skb))
553 return PTR_ERR(skb);
554
555 return ath10k_wmi_cmd_send(ar, skb,
556 ar->wmi.cmd->vdev_install_key_cmdid);
557}
558
559static inline int
560ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
561 const struct wmi_vdev_spectral_conf_arg *arg)
562{
563 struct sk_buff *skb;
564 u32 cmd_id;
565
566 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
567 if (IS_ERR(skb))
568 return PTR_ERR(skb);
569
570 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
571 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
572}
573
574static inline int
575ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
576 u32 enable)
577{
578 struct sk_buff *skb;
579 u32 cmd_id;
580
581 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
582 enable);
583 if (IS_ERR(skb))
584 return PTR_ERR(skb);
585
586 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
587 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
588}
589
590static inline int
591ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
592 const u8 peer_addr[ETH_ALEN],
593 const struct wmi_sta_uapsd_auto_trig_arg *args,
594 u32 num_ac)
595{
596 struct sk_buff *skb;
597 u32 cmd_id;
598
599 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
600 return -EOPNOTSUPP;
601
602 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
603 num_ac);
604 if (IS_ERR(skb))
605 return PTR_ERR(skb);
606
607 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
608 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
609}
610
611static inline int
612ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
613 const struct wmi_wmm_params_all_arg *arg)
614{
615 struct sk_buff *skb;
616 u32 cmd_id;
617
618 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
619 if (IS_ERR(skb))
620 return PTR_ERR(skb);
621
622 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
623 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
624}
625
626static inline int
627ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
628 const u8 peer_addr[ETH_ALEN])
629{
630 struct sk_buff *skb;
631
632 if (!ar->wmi.ops->gen_peer_create)
633 return -EOPNOTSUPP;
634
635 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
636 if (IS_ERR(skb))
637 return PTR_ERR(skb);
638
639 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
640}
641
642static inline int
643ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
644 const u8 peer_addr[ETH_ALEN])
645{
646 struct sk_buff *skb;
647
648 if (!ar->wmi.ops->gen_peer_delete)
649 return -EOPNOTSUPP;
650
651 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
652 if (IS_ERR(skb))
653 return PTR_ERR(skb);
654
655 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
656}
657
658static inline int
659ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
660 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
661{
662 struct sk_buff *skb;
663
664 if (!ar->wmi.ops->gen_peer_flush)
665 return -EOPNOTSUPP;
666
667 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
668 if (IS_ERR(skb))
669 return PTR_ERR(skb);
670
671 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
672}
673
674static inline int
675ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
676 enum wmi_peer_param param_id, u32 param_value)
677{
678 struct sk_buff *skb;
679
680 if (!ar->wmi.ops->gen_peer_set_param)
681 return -EOPNOTSUPP;
682
683 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
684 param_value);
685 if (IS_ERR(skb))
686 return PTR_ERR(skb);
687
688 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
689}
690
691static inline int
692ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
693 enum wmi_sta_ps_mode psmode)
694{
695 struct sk_buff *skb;
696
697 if (!ar->wmi.ops->gen_set_psmode)
698 return -EOPNOTSUPP;
699
700 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
701 if (IS_ERR(skb))
702 return PTR_ERR(skb);
703
704 return ath10k_wmi_cmd_send(ar, skb,
705 ar->wmi.cmd->sta_powersave_mode_cmdid);
706}
707
708static inline int
709ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
710 enum wmi_sta_powersave_param param_id, u32 value)
711{
712 struct sk_buff *skb;
713
714 if (!ar->wmi.ops->gen_set_sta_ps)
715 return -EOPNOTSUPP;
716
717 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
718 if (IS_ERR(skb))
719 return PTR_ERR(skb);
720
721 return ath10k_wmi_cmd_send(ar, skb,
722 ar->wmi.cmd->sta_powersave_param_cmdid);
723}
724
725static inline int
726ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
727 enum wmi_ap_ps_peer_param param_id, u32 value)
728{
729 struct sk_buff *skb;
730
731 if (!ar->wmi.ops->gen_set_ap_ps)
732 return -EOPNOTSUPP;
733
734 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
735 if (IS_ERR(skb))
736 return PTR_ERR(skb);
737
738 return ath10k_wmi_cmd_send(ar, skb,
739 ar->wmi.cmd->ap_ps_peer_param_cmdid);
740}
741
742static inline int
743ath10k_wmi_scan_chan_list(struct ath10k *ar,
744 const struct wmi_scan_chan_list_arg *arg)
745{
746 struct sk_buff *skb;
747
748 if (!ar->wmi.ops->gen_scan_chan_list)
749 return -EOPNOTSUPP;
750
751 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
752 if (IS_ERR(skb))
753 return PTR_ERR(skb);
754
755 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
756}
757
758static inline int
759ath10k_wmi_peer_assoc(struct ath10k *ar,
760 const struct wmi_peer_assoc_complete_arg *arg)
761{
762 struct sk_buff *skb;
763
764 if (!ar->wmi.ops->gen_peer_assoc)
765 return -EOPNOTSUPP;
766
767 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
768 if (IS_ERR(skb))
769 return PTR_ERR(skb);
770
771 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
772}
773
774static inline int
775ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
776 const void *bcn, size_t bcn_len,
777 u32 bcn_paddr, bool dtim_zero,
778 bool deliver_cab)
779{
780 struct sk_buff *skb;
781 int ret;
782
783 if (!ar->wmi.ops->gen_beacon_dma)
784 return -EOPNOTSUPP;
785
786 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
787 dtim_zero, deliver_cab);
788 if (IS_ERR(skb))
789 return PTR_ERR(skb);
790
791 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
792 ar->wmi.cmd->pdev_send_bcn_cmdid);
793 if (ret) {
794 dev_kfree_skb(skb);
795 return ret;
796 }
797
798 return 0;
799}
800
801static inline int
802ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
803 const struct wmi_wmm_params_all_arg *arg)
804{
805 struct sk_buff *skb;
806
807 if (!ar->wmi.ops->gen_pdev_set_wmm)
808 return -EOPNOTSUPP;
809
810 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
811 if (IS_ERR(skb))
812 return PTR_ERR(skb);
813
814 return ath10k_wmi_cmd_send(ar, skb,
815 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
816}
817
818static inline int
819ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
820{
821 struct sk_buff *skb;
822
823 if (!ar->wmi.ops->gen_request_stats)
824 return -EOPNOTSUPP;
825
826 skb = ar->wmi.ops->gen_request_stats(ar, stats_id);
827 if (IS_ERR(skb))
828 return PTR_ERR(skb);
829
830 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
831}
832
833static inline int
834ath10k_wmi_force_fw_hang(struct ath10k *ar,
835 enum wmi_force_fw_hang_type type, u32 delay_ms)
836{
837 struct sk_buff *skb;
838
839 if (!ar->wmi.ops->gen_force_fw_hang)
840 return -EOPNOTSUPP;
841
842 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
843 if (IS_ERR(skb))
844 return PTR_ERR(skb);
845
846 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
847}
848
849static inline int
850ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
851{
852 struct sk_buff *skb;
853
854 if (!ar->wmi.ops->gen_dbglog_cfg)
855 return -EOPNOTSUPP;
856
857 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
858 if (IS_ERR(skb))
859 return PTR_ERR(skb);
860
861 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
862}
863
864static inline int
865ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
866{
867 struct sk_buff *skb;
868
869 if (!ar->wmi.ops->gen_pktlog_enable)
870 return -EOPNOTSUPP;
871
872 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
873 if (IS_ERR(skb))
874 return PTR_ERR(skb);
875
876 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
877}
878
879static inline int
880ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
881{
882 struct sk_buff *skb;
883
884 if (!ar->wmi.ops->gen_pktlog_disable)
885 return -EOPNOTSUPP;
886
887 skb = ar->wmi.ops->gen_pktlog_disable(ar);
888 if (IS_ERR(skb))
889 return PTR_ERR(skb);
890
891 return ath10k_wmi_cmd_send(ar, skb,
892 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
893}
894
895static inline int
896ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
897 u32 next_offset, u32 enabled)
898{
899 struct sk_buff *skb;
900
901 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
902 return -EOPNOTSUPP;
903
904 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
905 next_offset, enabled);
906 if (IS_ERR(skb))
907 return PTR_ERR(skb);
908
909 return ath10k_wmi_cmd_send(ar, skb,
910 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
911}
912
913static inline int
914ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
915{
916 struct sk_buff *skb;
917
918 if (!ar->wmi.ops->gen_pdev_get_temperature)
919 return -EOPNOTSUPP;
920
921 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
922 if (IS_ERR(skb))
923 return PTR_ERR(skb);
924
925 return ath10k_wmi_cmd_send(ar, skb,
926 ar->wmi.cmd->pdev_get_temperature_cmdid);
927}
928
929static inline int
930ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
931{
932 struct sk_buff *skb;
933
934 if (!ar->wmi.ops->gen_addba_clear_resp)
935 return -EOPNOTSUPP;
936
937 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
938 if (IS_ERR(skb))
939 return PTR_ERR(skb);
940
941 return ath10k_wmi_cmd_send(ar, skb,
942 ar->wmi.cmd->addba_clear_resp_cmdid);
943}
944
945static inline int
946ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
947 u32 tid, u32 buf_size)
948{
949 struct sk_buff *skb;
950
951 if (!ar->wmi.ops->gen_addba_send)
952 return -EOPNOTSUPP;
953
954 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
955 if (IS_ERR(skb))
956 return PTR_ERR(skb);
957
958 return ath10k_wmi_cmd_send(ar, skb,
959 ar->wmi.cmd->addba_send_cmdid);
960}
961
962static inline int
963ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
964 u32 tid, u32 status)
965{
966 struct sk_buff *skb;
967
968 if (!ar->wmi.ops->gen_addba_set_resp)
969 return -EOPNOTSUPP;
970
971 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
972 if (IS_ERR(skb))
973 return PTR_ERR(skb);
974
975 return ath10k_wmi_cmd_send(ar, skb,
976 ar->wmi.cmd->addba_set_resp_cmdid);
977}
978
979static inline int
980ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
981 u32 tid, u32 initiator, u32 reason)
982{
983 struct sk_buff *skb;
984
985 if (!ar->wmi.ops->gen_delba_send)
986 return -EOPNOTSUPP;
987
988 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
989 reason);
990 if (IS_ERR(skb))
991 return PTR_ERR(skb);
992
993 return ath10k_wmi_cmd_send(ar, skb,
994 ar->wmi.cmd->delba_send_cmdid);
995}
996
997static inline int
998ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
999 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1000 void *prb_ies, size_t prb_ies_len)
1001{
1002 struct sk_buff *skb;
1003
1004 if (!ar->wmi.ops->gen_bcn_tmpl)
1005 return -EOPNOTSUPP;
1006
1007 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1008 prb_caps, prb_erp, prb_ies,
1009 prb_ies_len);
1010 if (IS_ERR(skb))
1011 return PTR_ERR(skb);
1012
1013 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1014}
1015
1016static inline int
1017ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1018{
1019 struct sk_buff *skb;
1020
1021 if (!ar->wmi.ops->gen_prb_tmpl)
1022 return -EOPNOTSUPP;
1023
1024 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1025 if (IS_ERR(skb))
1026 return PTR_ERR(skb);
1027
1028 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1029}
1030
1031static inline int
1032ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1033{
1034 struct sk_buff *skb;
1035
1036 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1037 return -EOPNOTSUPP;
1038
1039 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1040 if (IS_ERR(skb))
1041 return PTR_ERR(skb);
1042
1043 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1044}
1045
1046static inline int
1047ath10k_wmi_sta_keepalive(struct ath10k *ar,
1048 const struct wmi_sta_keepalive_arg *arg)
1049{
1050 struct sk_buff *skb;
1051 u32 cmd_id;
1052
1053 if (!ar->wmi.ops->gen_sta_keepalive)
1054 return -EOPNOTSUPP;
1055
1056 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1057 if (IS_ERR(skb))
1058 return PTR_ERR(skb);
1059
1060 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1061 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1062}
1063
1064#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
new file mode 100644
index 000000000000..71614ba1b145
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -0,0 +1,2696 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17#include "core.h"
18#include "debug.h"
19#include "hw.h"
20#include "wmi.h"
21#include "wmi-ops.h"
22#include "wmi-tlv.h"
23
24/***************/
25/* TLV helpers */
26/**************/
27
28struct wmi_tlv_policy {
29 size_t min_len;
30};
31
32static const struct wmi_tlv_policy wmi_tlv_policies[] = {
33 [WMI_TLV_TAG_ARRAY_BYTE]
34 = { .min_len = sizeof(u8) },
35 [WMI_TLV_TAG_ARRAY_UINT32]
36 = { .min_len = sizeof(u32) },
37 [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
38 = { .min_len = sizeof(struct wmi_scan_event) },
39 [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
40 = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
41 [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
42 = { .min_len = sizeof(struct wmi_chan_info_event) },
43 [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
44 = { .min_len = sizeof(struct wmi_vdev_start_response_event) },
45 [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
46 = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
47 [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
48 = { .min_len = sizeof(struct wmi_host_swba_event) },
49 [WMI_TLV_TAG_STRUCT_TIM_INFO]
50 = { .min_len = sizeof(struct wmi_tim_info) },
51 [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
52 = { .min_len = sizeof(struct wmi_p2p_noa_info) },
53 [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
54 = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
55 [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
56 = { .min_len = sizeof(struct hal_reg_capabilities) },
57 [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
58 = { .min_len = sizeof(struct wlan_host_mem_req) },
59 [WMI_TLV_TAG_STRUCT_READY_EVENT]
60 = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
61 [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
62 = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
63 [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
64 = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
65};
66
67static int
68ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
69 int (*iter)(struct ath10k *ar, u16 tag, u16 len,
70 const void *ptr, void *data),
71 void *data)
72{
73 const void *begin = ptr;
74 const struct wmi_tlv *tlv;
75 u16 tlv_tag, tlv_len;
76 int ret;
77
78 while (len > 0) {
79 if (len < sizeof(*tlv)) {
80 ath10k_dbg(ar, ATH10K_DBG_WMI,
81 "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
82 ptr - begin, len, sizeof(*tlv));
83 return -EINVAL;
84 }
85
86 tlv = ptr;
87 tlv_tag = __le16_to_cpu(tlv->tag);
88 tlv_len = __le16_to_cpu(tlv->len);
89 ptr += sizeof(*tlv);
90 len -= sizeof(*tlv);
91
92 if (tlv_len > len) {
93 ath10k_dbg(ar, ATH10K_DBG_WMI,
94 "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
95 tlv_tag, ptr - begin, len, tlv_len);
96 return -EINVAL;
97 }
98
99 if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
100 wmi_tlv_policies[tlv_tag].min_len &&
101 wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
102 ath10k_dbg(ar, ATH10K_DBG_WMI,
103 "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
104 tlv_tag, ptr - begin, tlv_len,
105 wmi_tlv_policies[tlv_tag].min_len);
106 return -EINVAL;
107 }
108
109 ret = iter(ar, tlv_tag, tlv_len, ptr, data);
110 if (ret)
111 return ret;
112
113 ptr += tlv_len;
114 len -= tlv_len;
115 }
116
117 return 0;
118}
119
120static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
121 const void *ptr, void *data)
122{
123 const void **tb = data;
124
125 if (tag < WMI_TLV_TAG_MAX)
126 tb[tag] = ptr;
127
128 return 0;
129}
130
131static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
132 const void *ptr, size_t len)
133{
134 return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
135 (void *)tb);
136}
137
138static const void **
139ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
140 size_t len, gfp_t gfp)
141{
142 const void **tb;
143 int ret;
144
145 tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp);
146 if (!tb)
147 return ERR_PTR(-ENOMEM);
148
149 ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
150 if (ret) {
151 kfree(tb);
152 return ERR_PTR(ret);
153 }
154
155 return tb;
156}
157
158static u16 ath10k_wmi_tlv_len(const void *ptr)
159{
160 return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
161}
162
163/**************/
164/* TLV events */
165/**************/
166static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
167 struct sk_buff *skb)
168{
169 const void **tb;
170 const struct wmi_tlv_bcn_tx_status_ev *ev;
171 u32 vdev_id, tx_status;
172 int ret;
173
174 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
175 if (IS_ERR(tb)) {
176 ret = PTR_ERR(tb);
177 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
178 return ret;
179 }
180
181 ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
182 if (!ev) {
183 kfree(tb);
184 return -EPROTO;
185 }
186
187 tx_status = __le32_to_cpu(ev->tx_status);
188 vdev_id = __le32_to_cpu(ev->vdev_id);
189
190 switch (tx_status) {
191 case WMI_TLV_BCN_TX_STATUS_OK:
192 break;
193 case WMI_TLV_BCN_TX_STATUS_XRETRY:
194 case WMI_TLV_BCN_TX_STATUS_DROP:
195 case WMI_TLV_BCN_TX_STATUS_FILTERED:
196 /* FIXME: It's probably worth telling mac80211 to stop the
197 * interface as it is crippled.
198 */
199 ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
200 vdev_id, tx_status);
201 break;
202 }
203
204 kfree(tb);
205 return 0;
206}
207
208static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
209 struct sk_buff *skb)
210{
211 const void **tb;
212 const struct wmi_tlv_diag_data_ev *ev;
213 const struct wmi_tlv_diag_item *item;
214 const void *data;
215 int ret, num_items, len;
216
217 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
218 if (IS_ERR(tb)) {
219 ret = PTR_ERR(tb);
220 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
221 return ret;
222 }
223
224 ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
225 data = tb[WMI_TLV_TAG_ARRAY_BYTE];
226 if (!ev || !data) {
227 kfree(tb);
228 return -EPROTO;
229 }
230
231 num_items = __le32_to_cpu(ev->num_items);
232 len = ath10k_wmi_tlv_len(data);
233
234 while (num_items--) {
235 if (len == 0)
236 break;
237 if (len < sizeof(*item)) {
238 ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
239 break;
240 }
241
242 item = data;
243
244 if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
245 ath10k_warn(ar, "failed to parse diag data: item is too long\n");
246 break;
247 }
248
249 trace_ath10k_wmi_diag_container(ar,
250 item->type,
251 __le32_to_cpu(item->timestamp),
252 __le32_to_cpu(item->code),
253 __le16_to_cpu(item->len),
254 item->payload);
255
256 len -= sizeof(*item);
257 len -= roundup(__le16_to_cpu(item->len), 4);
258
259 data += sizeof(*item);
260 data += roundup(__le16_to_cpu(item->len), 4);
261 }
262
263 if (num_items != -1 || len != 0)
264 ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
265 num_items, len);
266
267 kfree(tb);
268 return 0;
269}
270
271static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
272 struct sk_buff *skb)
273{
274 const void **tb;
275 const void *data;
276 int ret, len;
277
278 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
279 if (IS_ERR(tb)) {
280 ret = PTR_ERR(tb);
281 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
282 return ret;
283 }
284
285 data = tb[WMI_TLV_TAG_ARRAY_BYTE];
286 if (!data) {
287 kfree(tb);
288 return -EPROTO;
289 }
290 len = ath10k_wmi_tlv_len(data);
291
292 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
293 trace_ath10k_wmi_diag(ar, data, len);
294
295 kfree(tb);
296 return 0;
297}
298
299/***********/
300/* TLV ops */
301/***********/
302
303static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
304{
305 struct wmi_cmd_hdr *cmd_hdr;
306 enum wmi_tlv_event_id id;
307
308 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
309 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
310
311 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
312 return;
313
314 trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
315
316 switch (id) {
317 case WMI_TLV_MGMT_RX_EVENTID:
318 ath10k_wmi_event_mgmt_rx(ar, skb);
319 /* mgmt_rx() owns the skb now! */
320 return;
321 case WMI_TLV_SCAN_EVENTID:
322 ath10k_wmi_event_scan(ar, skb);
323 break;
324 case WMI_TLV_CHAN_INFO_EVENTID:
325 ath10k_wmi_event_chan_info(ar, skb);
326 break;
327 case WMI_TLV_ECHO_EVENTID:
328 ath10k_wmi_event_echo(ar, skb);
329 break;
330 case WMI_TLV_DEBUG_MESG_EVENTID:
331 ath10k_wmi_event_debug_mesg(ar, skb);
332 break;
333 case WMI_TLV_UPDATE_STATS_EVENTID:
334 ath10k_wmi_event_update_stats(ar, skb);
335 break;
336 case WMI_TLV_VDEV_START_RESP_EVENTID:
337 ath10k_wmi_event_vdev_start_resp(ar, skb);
338 break;
339 case WMI_TLV_VDEV_STOPPED_EVENTID:
340 ath10k_wmi_event_vdev_stopped(ar, skb);
341 break;
342 case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
343 ath10k_wmi_event_peer_sta_kickout(ar, skb);
344 break;
345 case WMI_TLV_HOST_SWBA_EVENTID:
346 ath10k_wmi_event_host_swba(ar, skb);
347 break;
348 case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
349 ath10k_wmi_event_tbttoffset_update(ar, skb);
350 break;
351 case WMI_TLV_PHYERR_EVENTID:
352 ath10k_wmi_event_phyerr(ar, skb);
353 break;
354 case WMI_TLV_ROAM_EVENTID:
355 ath10k_wmi_event_roam(ar, skb);
356 break;
357 case WMI_TLV_PROFILE_MATCH:
358 ath10k_wmi_event_profile_match(ar, skb);
359 break;
360 case WMI_TLV_DEBUG_PRINT_EVENTID:
361 ath10k_wmi_event_debug_print(ar, skb);
362 break;
363 case WMI_TLV_PDEV_QVIT_EVENTID:
364 ath10k_wmi_event_pdev_qvit(ar, skb);
365 break;
366 case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
367 ath10k_wmi_event_wlan_profile_data(ar, skb);
368 break;
369 case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
370 ath10k_wmi_event_rtt_measurement_report(ar, skb);
371 break;
372 case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
373 ath10k_wmi_event_tsf_measurement_report(ar, skb);
374 break;
375 case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
376 ath10k_wmi_event_rtt_error_report(ar, skb);
377 break;
378 case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
379 ath10k_wmi_event_wow_wakeup_host(ar, skb);
380 break;
381 case WMI_TLV_DCS_INTERFERENCE_EVENTID:
382 ath10k_wmi_event_dcs_interference(ar, skb);
383 break;
384 case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
385 ath10k_wmi_event_pdev_tpc_config(ar, skb);
386 break;
387 case WMI_TLV_PDEV_FTM_INTG_EVENTID:
388 ath10k_wmi_event_pdev_ftm_intg(ar, skb);
389 break;
390 case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
391 ath10k_wmi_event_gtk_offload_status(ar, skb);
392 break;
393 case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
394 ath10k_wmi_event_gtk_rekey_fail(ar, skb);
395 break;
396 case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
397 ath10k_wmi_event_delba_complete(ar, skb);
398 break;
399 case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
400 ath10k_wmi_event_addba_complete(ar, skb);
401 break;
402 case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
403 ath10k_wmi_event_vdev_install_key_complete(ar, skb);
404 break;
405 case WMI_TLV_SERVICE_READY_EVENTID:
406 ath10k_wmi_event_service_ready(ar, skb);
407 break;
408 case WMI_TLV_READY_EVENTID:
409 ath10k_wmi_event_ready(ar, skb);
410 break;
411 case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
412 ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
413 break;
414 case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
415 ath10k_wmi_tlv_event_diag_data(ar, skb);
416 break;
417 case WMI_TLV_DIAG_EVENTID:
418 ath10k_wmi_tlv_event_diag(ar, skb);
419 break;
420 default:
421 ath10k_warn(ar, "Unknown eventid: %d\n", id);
422 break;
423 }
424
425 dev_kfree_skb(skb);
426}
427
428static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
429 struct sk_buff *skb,
430 struct wmi_scan_ev_arg *arg)
431{
432 const void **tb;
433 const struct wmi_scan_event *ev;
434 int ret;
435
436 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
437 if (IS_ERR(tb)) {
438 ret = PTR_ERR(tb);
439 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
440 return ret;
441 }
442
443 ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
444 if (!ev) {
445 kfree(tb);
446 return -EPROTO;
447 }
448
449 arg->event_type = ev->event_type;
450 arg->reason = ev->reason;
451 arg->channel_freq = ev->channel_freq;
452 arg->scan_req_id = ev->scan_req_id;
453 arg->scan_id = ev->scan_id;
454 arg->vdev_id = ev->vdev_id;
455
456 kfree(tb);
457 return 0;
458}
459
460static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
461 struct sk_buff *skb,
462 struct wmi_mgmt_rx_ev_arg *arg)
463{
464 const void **tb;
465 const struct wmi_tlv_mgmt_rx_ev *ev;
466 const u8 *frame;
467 u32 msdu_len;
468 int ret;
469
470 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
471 if (IS_ERR(tb)) {
472 ret = PTR_ERR(tb);
473 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
474 return ret;
475 }
476
477 ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
478 frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
479
480 if (!ev || !frame) {
481 kfree(tb);
482 return -EPROTO;
483 }
484
485 arg->channel = ev->channel;
486 arg->buf_len = ev->buf_len;
487 arg->status = ev->status;
488 arg->snr = ev->snr;
489 arg->phy_mode = ev->phy_mode;
490 arg->rate = ev->rate;
491
492 msdu_len = __le32_to_cpu(arg->buf_len);
493
494 if (skb->len < (frame - skb->data) + msdu_len) {
495 kfree(tb);
496 return -EPROTO;
497 }
498
499 /* shift the sk_buff to point to `frame` */
500 skb_trim(skb, 0);
501 skb_put(skb, frame - skb->data);
502 skb_pull(skb, frame - skb->data);
503 skb_put(skb, msdu_len);
504
505 kfree(tb);
506 return 0;
507}
508
509static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
510 struct sk_buff *skb,
511 struct wmi_ch_info_ev_arg *arg)
512{
513 const void **tb;
514 const struct wmi_chan_info_event *ev;
515 int ret;
516
517 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
518 if (IS_ERR(tb)) {
519 ret = PTR_ERR(tb);
520 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
521 return ret;
522 }
523
524 ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
525 if (!ev) {
526 kfree(tb);
527 return -EPROTO;
528 }
529
530 arg->err_code = ev->err_code;
531 arg->freq = ev->freq;
532 arg->cmd_flags = ev->cmd_flags;
533 arg->noise_floor = ev->noise_floor;
534 arg->rx_clear_count = ev->rx_clear_count;
535 arg->cycle_count = ev->cycle_count;
536
537 kfree(tb);
538 return 0;
539}
540
541static int
542ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
543 struct wmi_vdev_start_ev_arg *arg)
544{
545 const void **tb;
546 const struct wmi_vdev_start_response_event *ev;
547 int ret;
548
549 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
550 if (IS_ERR(tb)) {
551 ret = PTR_ERR(tb);
552 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
553 return ret;
554 }
555
556 ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
557 if (!ev) {
558 kfree(tb);
559 return -EPROTO;
560 }
561
562 skb_pull(skb, sizeof(*ev));
563 arg->vdev_id = ev->vdev_id;
564 arg->req_id = ev->req_id;
565 arg->resp_type = ev->resp_type;
566 arg->status = ev->status;
567
568 kfree(tb);
569 return 0;
570}
571
572static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
573 struct sk_buff *skb,
574 struct wmi_peer_kick_ev_arg *arg)
575{
576 const void **tb;
577 const struct wmi_peer_sta_kickout_event *ev;
578 int ret;
579
580 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
581 if (IS_ERR(tb)) {
582 ret = PTR_ERR(tb);
583 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
584 return ret;
585 }
586
587 ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
588 if (!ev) {
589 kfree(tb);
590 return -EPROTO;
591 }
592
593 arg->mac_addr = ev->peer_macaddr.addr;
594
595 kfree(tb);
596 return 0;
597}
598
599struct wmi_tlv_swba_parse {
600 const struct wmi_host_swba_event *ev;
601 bool tim_done;
602 bool noa_done;
603 size_t n_tim;
604 size_t n_noa;
605 struct wmi_swba_ev_arg *arg;
606};
607
608static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
609 const void *ptr, void *data)
610{
611 struct wmi_tlv_swba_parse *swba = data;
612
613 if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
614 return -EPROTO;
615
616 if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
617 return -ENOBUFS;
618
619 swba->arg->tim_info[swba->n_tim++] = ptr;
620 return 0;
621}
622
623static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
624 const void *ptr, void *data)
625{
626 struct wmi_tlv_swba_parse *swba = data;
627
628 if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
629 return -EPROTO;
630
631 if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
632 return -ENOBUFS;
633
634 swba->arg->noa_info[swba->n_noa++] = ptr;
635 return 0;
636}
637
638static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
639 const void *ptr, void *data)
640{
641 struct wmi_tlv_swba_parse *swba = data;
642 int ret;
643
644 switch (tag) {
645 case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
646 swba->ev = ptr;
647 break;
648 case WMI_TLV_TAG_ARRAY_STRUCT:
649 if (!swba->tim_done) {
650 swba->tim_done = true;
651 ret = ath10k_wmi_tlv_iter(ar, ptr, len,
652 ath10k_wmi_tlv_swba_tim_parse,
653 swba);
654 if (ret)
655 return ret;
656 } else if (!swba->noa_done) {
657 swba->noa_done = true;
658 ret = ath10k_wmi_tlv_iter(ar, ptr, len,
659 ath10k_wmi_tlv_swba_noa_parse,
660 swba);
661 if (ret)
662 return ret;
663 }
664 break;
665 default:
666 break;
667 }
668 return 0;
669}
670
671static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
672 struct sk_buff *skb,
673 struct wmi_swba_ev_arg *arg)
674{
675 struct wmi_tlv_swba_parse swba = { .arg = arg };
676 u32 map;
677 size_t n_vdevs;
678 int ret;
679
680 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
681 ath10k_wmi_tlv_swba_parse, &swba);
682 if (ret) {
683 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
684 return ret;
685 }
686
687 if (!swba.ev)
688 return -EPROTO;
689
690 arg->vdev_map = swba.ev->vdev_map;
691
692 for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
693 if (map & BIT(0))
694 n_vdevs++;
695
696 if (n_vdevs != swba.n_tim ||
697 n_vdevs != swba.n_noa)
698 return -EPROTO;
699
700 return 0;
701}
702
703static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
704 struct sk_buff *skb,
705 struct wmi_phyerr_ev_arg *arg)
706{
707 const void **tb;
708 const struct wmi_tlv_phyerr_ev *ev;
709 const void *phyerrs;
710 int ret;
711
712 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
713 if (IS_ERR(tb)) {
714 ret = PTR_ERR(tb);
715 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
716 return ret;
717 }
718
719 ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
720 phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
721
722 if (!ev || !phyerrs) {
723 kfree(tb);
724 return -EPROTO;
725 }
726
727 arg->num_phyerrs = ev->num_phyerrs;
728 arg->tsf_l32 = ev->tsf_l32;
729 arg->tsf_u32 = ev->tsf_u32;
730 arg->buf_len = ev->buf_len;
731 arg->phyerrs = phyerrs;
732
733 kfree(tb);
734 return 0;
735}
736
737#define WMI_TLV_ABI_VER_NS0 0x5F414351
738#define WMI_TLV_ABI_VER_NS1 0x00004C4D
739#define WMI_TLV_ABI_VER_NS2 0x00000000
740#define WMI_TLV_ABI_VER_NS3 0x00000000
741
742#define WMI_TLV_ABI_VER0_MAJOR 1
743#define WMI_TLV_ABI_VER0_MINOR 0
744#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
745 (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF))
746#define WMI_TLV_ABI_VER1 53
747
748static int
749ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
750 const void *ptr, void *data)
751{
752 struct wmi_svc_rdy_ev_arg *arg = data;
753 int i;
754
755 if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
756 return -EPROTO;
757
758 for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
759 if (!arg->mem_reqs[i]) {
760 arg->mem_reqs[i] = ptr;
761 return 0;
762 }
763 }
764
765 return -ENOMEM;
766}
767
768static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
769 struct sk_buff *skb,
770 struct wmi_svc_rdy_ev_arg *arg)
771{
772 const void **tb;
773 const struct hal_reg_capabilities *reg;
774 const struct wmi_tlv_svc_rdy_ev *ev;
775 const __le32 *svc_bmap;
776 const struct wlan_host_mem_req *mem_reqs;
777 int ret;
778
779 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
780 if (IS_ERR(tb)) {
781 ret = PTR_ERR(tb);
782 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
783 return ret;
784 }
785
786 ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
787 reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
788 svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
789 mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
790
791 if (!ev || !reg || !svc_bmap || !mem_reqs) {
792 kfree(tb);
793 return -EPROTO;
794 }
795
796 /* This is an internal ABI compatibility check for WMI TLV so check it
797 * here instead of the generic WMI code.
798 */
799 ath10k_dbg(ar, ATH10K_DBG_WMI,
800 "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
801 __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
802 __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
803 __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
804 __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
805 __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
806
807 if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
808 __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
809 __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
810 __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
811 __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
812 kfree(tb);
813 return -ENOTSUPP;
814 }
815
816 arg->min_tx_power = ev->hw_min_tx_power;
817 arg->max_tx_power = ev->hw_max_tx_power;
818 arg->ht_cap = ev->ht_cap_info;
819 arg->vht_cap = ev->vht_cap_info;
820 arg->sw_ver0 = ev->abi.abi_ver0;
821 arg->sw_ver1 = ev->abi.abi_ver1;
822 arg->fw_build = ev->fw_build_vers;
823 arg->phy_capab = ev->phy_capability;
824 arg->num_rf_chains = ev->num_rf_chains;
825 arg->eeprom_rd = reg->eeprom_rd;
826 arg->num_mem_reqs = ev->num_mem_reqs;
827 arg->service_map = svc_bmap;
828 arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
829
830 ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
831 ath10k_wmi_tlv_parse_mem_reqs, arg);
832 if (ret) {
833 kfree(tb);
834 ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
835 return ret;
836 }
837
838 kfree(tb);
839 return 0;
840}
841
842static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
843 struct sk_buff *skb,
844 struct wmi_rdy_ev_arg *arg)
845{
846 const void **tb;
847 const struct wmi_tlv_rdy_ev *ev;
848 int ret;
849
850 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
851 if (IS_ERR(tb)) {
852 ret = PTR_ERR(tb);
853 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
854 return ret;
855 }
856
857 ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
858 if (!ev) {
859 kfree(tb);
860 return -EPROTO;
861 }
862
863 arg->sw_version = ev->abi.abi_ver0;
864 arg->abi_version = ev->abi.abi_ver1;
865 arg->status = ev->status;
866 arg->mac_addr = ev->mac_addr.addr;
867
868 kfree(tb);
869 return 0;
870}
871
872static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
873 struct sk_buff *skb,
874 struct ath10k_fw_stats *stats)
875{
876 const void **tb;
877 const struct wmi_stats_event *ev;
878 const void *data;
879 u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
880 size_t data_len;
881 int ret;
882
883 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
884 if (IS_ERR(tb)) {
885 ret = PTR_ERR(tb);
886 ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
887 return ret;
888 }
889
890 ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
891 data = tb[WMI_TLV_TAG_ARRAY_BYTE];
892
893 if (!ev || !data) {
894 kfree(tb);
895 return -EPROTO;
896 }
897
898 data_len = ath10k_wmi_tlv_len(data);
899 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
900 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
901 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
902
903 WARN_ON(1); /* FIXME: not implemented yet */
904
905 kfree(tb);
906 return 0;
907}
908
909static struct sk_buff *
910ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
911{
912 struct wmi_tlv_pdev_suspend *cmd;
913 struct wmi_tlv *tlv;
914 struct sk_buff *skb;
915
916 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
917 if (!skb)
918 return ERR_PTR(-ENOMEM);
919
920 tlv = (void *)skb->data;
921 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
922 tlv->len = __cpu_to_le16(sizeof(*cmd));
923 cmd = (void *)tlv->value;
924 cmd->opt = __cpu_to_le32(opt);
925
926 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
927 return skb;
928}
929
930static struct sk_buff *
931ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
932{
933 struct wmi_tlv_resume_cmd *cmd;
934 struct wmi_tlv *tlv;
935 struct sk_buff *skb;
936
937 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
938 if (!skb)
939 return ERR_PTR(-ENOMEM);
940
941 tlv = (void *)skb->data;
942 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
943 tlv->len = __cpu_to_le16(sizeof(*cmd));
944 cmd = (void *)tlv->value;
945 cmd->reserved = __cpu_to_le32(0);
946
947 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
948 return skb;
949}
950
951static struct sk_buff *
952ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
953 u16 rd, u16 rd2g, u16 rd5g,
954 u16 ctl2g, u16 ctl5g,
955 enum wmi_dfs_region dfs_reg)
956{
957 struct wmi_tlv_pdev_set_rd_cmd *cmd;
958 struct wmi_tlv *tlv;
959 struct sk_buff *skb;
960
961 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
962 if (!skb)
963 return ERR_PTR(-ENOMEM);
964
965 tlv = (void *)skb->data;
966 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
967 tlv->len = __cpu_to_le16(sizeof(*cmd));
968 cmd = (void *)tlv->value;
969 cmd->regd = __cpu_to_le32(rd);
970 cmd->regd_2ghz = __cpu_to_le32(rd2g);
971 cmd->regd_5ghz = __cpu_to_le32(rd5g);
972 cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
973 cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
974
975 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
976 return skb;
977}
978
979static struct sk_buff *
980ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
981 u32 param_value)
982{
983 struct wmi_tlv_pdev_set_param_cmd *cmd;
984 struct wmi_tlv *tlv;
985 struct sk_buff *skb;
986
987 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
988 if (!skb)
989 return ERR_PTR(-ENOMEM);
990
991 tlv = (void *)skb->data;
992 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
993 tlv->len = __cpu_to_le16(sizeof(*cmd));
994 cmd = (void *)tlv->value;
995 cmd->param_id = __cpu_to_le32(param_id);
996 cmd->param_value = __cpu_to_le32(param_value);
997
998 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
999 return skb;
1000}
1001
1002static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1003{
1004 struct sk_buff *skb;
1005 struct wmi_tlv *tlv;
1006 struct wmi_tlv_init_cmd *cmd;
1007 struct wmi_tlv_resource_config *cfg;
1008 struct wmi_host_mem_chunks *chunks;
1009 size_t len, chunks_len;
1010 void *ptr;
1011
1012 chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
1013 len = (sizeof(*tlv) + sizeof(*cmd)) +
1014 (sizeof(*tlv) + sizeof(*cfg)) +
1015 (sizeof(*tlv) + chunks_len);
1016
1017 skb = ath10k_wmi_alloc_skb(ar, len);
1018 if (!skb)
1019 return ERR_PTR(-ENOMEM);
1020
1021 ptr = skb->data;
1022
1023 tlv = ptr;
1024 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
1025 tlv->len = __cpu_to_le16(sizeof(*cmd));
1026 cmd = (void *)tlv->value;
1027 ptr += sizeof(*tlv);
1028 ptr += sizeof(*cmd);
1029
1030 tlv = ptr;
1031 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
1032 tlv->len = __cpu_to_le16(sizeof(*cfg));
1033 cfg = (void *)tlv->value;
1034 ptr += sizeof(*tlv);
1035 ptr += sizeof(*cfg);
1036
1037 tlv = ptr;
1038 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1039 tlv->len = __cpu_to_le16(chunks_len);
1040 chunks = (void *)tlv->value;
1041
1042 ptr += sizeof(*tlv);
1043 ptr += chunks_len;
1044
1045 cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
1046 cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
1047 cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
1048 cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
1049 cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
1050 cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
1051 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
1052
1053 cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1054 cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
1055
1056 if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
1057 cfg->num_offload_peers = __cpu_to_le32(3);
1058 cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
1059 } else {
1060 cfg->num_offload_peers = __cpu_to_le32(0);
1061 cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
1062 }
1063
1064 cfg->num_peer_keys = __cpu_to_le32(2);
1065 cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
1066 cfg->ast_skid_limit = __cpu_to_le32(0x10);
1067 cfg->tx_chain_mask = __cpu_to_le32(0x7);
1068 cfg->rx_chain_mask = __cpu_to_le32(0x7);
1069 cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
1070 cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
1071 cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
1072 cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
1073 cfg->rx_decap_mode = __cpu_to_le32(1);
1074 cfg->scan_max_pending_reqs = __cpu_to_le32(4);
1075 cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
1076 cfg->roam_offload_max_vdev = __cpu_to_le32(3);
1077 cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
1078 cfg->num_mcast_groups = __cpu_to_le32(0);
1079 cfg->num_mcast_table_elems = __cpu_to_le32(0);
1080 cfg->mcast2ucast_mode = __cpu_to_le32(0);
1081 cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
1082 cfg->num_wds_entries = __cpu_to_le32(0x20);
1083 cfg->dma_burst_size = __cpu_to_le32(0);
1084 cfg->mac_aggr_delim = __cpu_to_le32(0);
1085 cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
1086 cfg->vow_config = __cpu_to_le32(0);
1087 cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
1088 cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
1089 cfg->max_frag_entries = __cpu_to_le32(2);
1090 cfg->num_tdls_vdevs = __cpu_to_le32(1);
1091 cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
1092 cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
1093 cfg->num_multicast_filter_entries = __cpu_to_le32(5);
1094 cfg->num_wow_filters = __cpu_to_le32(0x16);
1095 cfg->num_keep_alive_pattern = __cpu_to_le32(6);
1096 cfg->keep_alive_pattern_size = __cpu_to_le32(0);
1097 cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
1098 cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
1099
1100 ath10k_wmi_put_host_mem_chunks(ar, chunks);
1101
1102 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
1103 return skb;
1104}
1105
1106static struct sk_buff *
1107ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1108 const struct wmi_start_scan_arg *arg)
1109{
1110 struct wmi_tlv_start_scan_cmd *cmd;
1111 struct wmi_tlv *tlv;
1112 struct sk_buff *skb;
1113 size_t len, chan_len, ssid_len, bssid_len, ie_len;
1114 __le32 *chans;
1115 struct wmi_ssid *ssids;
1116 struct wmi_mac_addr *addrs;
1117 void *ptr;
1118 int i, ret;
1119
1120 ret = ath10k_wmi_start_scan_verify(arg);
1121 if (ret)
1122 return ERR_PTR(ret);
1123
1124 chan_len = arg->n_channels * sizeof(__le32);
1125 ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
1126 bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1127 ie_len = roundup(arg->ie_len, 4);
1128 len = (sizeof(*tlv) + sizeof(*cmd)) +
1129 (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
1130 (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
1131 (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
1132 (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
1133
1134 skb = ath10k_wmi_alloc_skb(ar, len);
1135 if (!skb)
1136 return ERR_PTR(-ENOMEM);
1137
1138 ptr = (void *)skb->data;
1139 tlv = ptr;
1140 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
1141 tlv->len = __cpu_to_le16(sizeof(*cmd));
1142 cmd = (void *)tlv->value;
1143
1144 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
1145 cmd->burst_duration_ms = __cpu_to_le32(0);
1146 cmd->num_channels = __cpu_to_le32(arg->n_channels);
1147 cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
1148 cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
1149 cmd->ie_len = __cpu_to_le32(arg->ie_len);
1150 cmd->num_probes = __cpu_to_le32(3);
1151
1152 /* FIXME: There are some scan flag inconsistencies across firmwares,
1153 * e.g. WMI-TLV inverts the logic behind the following flag.
1154 */
1155 cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
1156
1157 ptr += sizeof(*tlv);
1158 ptr += sizeof(*cmd);
1159
1160 tlv = ptr;
1161 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
1162 tlv->len = __cpu_to_le16(chan_len);
1163 chans = (void *)tlv->value;
1164 for (i = 0; i < arg->n_channels; i++)
1165 chans[i] = __cpu_to_le32(arg->channels[i]);
1166
1167 ptr += sizeof(*tlv);
1168 ptr += chan_len;
1169
1170 tlv = ptr;
1171 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
1172 tlv->len = __cpu_to_le16(ssid_len);
1173 ssids = (void *)tlv->value;
1174 for (i = 0; i < arg->n_ssids; i++) {
1175 ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
1176 memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
1177 }
1178
1179 ptr += sizeof(*tlv);
1180 ptr += ssid_len;
1181
1182 tlv = ptr;
1183 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
1184 tlv->len = __cpu_to_le16(bssid_len);
1185 addrs = (void *)tlv->value;
1186 for (i = 0; i < arg->n_bssids; i++)
1187 ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
1188
1189 ptr += sizeof(*tlv);
1190 ptr += bssid_len;
1191
1192 tlv = ptr;
1193 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1194 tlv->len = __cpu_to_le16(ie_len);
1195 memcpy(tlv->value, arg->ie, arg->ie_len);
1196
1197 ptr += sizeof(*tlv);
1198 ptr += ie_len;
1199
1200 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
1201 return skb;
1202}
1203
1204static struct sk_buff *
1205ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
1206 const struct wmi_stop_scan_arg *arg)
1207{
1208 struct wmi_stop_scan_cmd *cmd;
1209 struct wmi_tlv *tlv;
1210 struct sk_buff *skb;
1211 u32 scan_id;
1212 u32 req_id;
1213
1214 if (arg->req_id > 0xFFF)
1215 return ERR_PTR(-EINVAL);
1216 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
1217 return ERR_PTR(-EINVAL);
1218
1219 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1220 if (!skb)
1221 return ERR_PTR(-ENOMEM);
1222
1223 scan_id = arg->u.scan_id;
1224 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
1225
1226 req_id = arg->req_id;
1227 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1228
1229 tlv = (void *)skb->data;
1230 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
1231 tlv->len = __cpu_to_le16(sizeof(*cmd));
1232 cmd = (void *)tlv->value;
1233 cmd->req_type = __cpu_to_le32(arg->req_type);
1234 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
1235 cmd->scan_id = __cpu_to_le32(scan_id);
1236 cmd->scan_req_id = __cpu_to_le32(req_id);
1237
1238 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
1239 return skb;
1240}
1241
1242static struct sk_buff *
1243ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
1244 u32 vdev_id,
1245 enum wmi_vdev_type vdev_type,
1246 enum wmi_vdev_subtype vdev_subtype,
1247 const u8 mac_addr[ETH_ALEN])
1248{
1249 struct wmi_vdev_create_cmd *cmd;
1250 struct wmi_tlv *tlv;
1251 struct sk_buff *skb;
1252
1253 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1254 if (!skb)
1255 return ERR_PTR(-ENOMEM);
1256
1257 tlv = (void *)skb->data;
1258 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
1259 tlv->len = __cpu_to_le16(sizeof(*cmd));
1260 cmd = (void *)tlv->value;
1261 cmd->vdev_id = __cpu_to_le32(vdev_id);
1262 cmd->vdev_type = __cpu_to_le32(vdev_type);
1263 cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
1264 ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
1265
1266 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
1267 return skb;
1268}
1269
1270static struct sk_buff *
1271ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
1272{
1273 struct wmi_vdev_delete_cmd *cmd;
1274 struct wmi_tlv *tlv;
1275 struct sk_buff *skb;
1276
1277 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1278 if (!skb)
1279 return ERR_PTR(-ENOMEM);
1280
1281 tlv = (void *)skb->data;
1282 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
1283 tlv->len = __cpu_to_le16(sizeof(*cmd));
1284 cmd = (void *)tlv->value;
1285 cmd->vdev_id = __cpu_to_le32(vdev_id);
1286
1287 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
1288 return skb;
1289}
1290
1291static struct sk_buff *
1292ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
1293 const struct wmi_vdev_start_request_arg *arg,
1294 bool restart)
1295{
1296 struct wmi_tlv_vdev_start_cmd *cmd;
1297 struct wmi_channel *ch;
1298 struct wmi_p2p_noa_descriptor *noa;
1299 struct wmi_tlv *tlv;
1300 struct sk_buff *skb;
1301 size_t len;
1302 void *ptr;
1303 u32 flags = 0;
1304
1305 if (WARN_ON(arg->ssid && arg->ssid_len == 0))
1306 return ERR_PTR(-EINVAL);
1307 if (WARN_ON(arg->hidden_ssid && !arg->ssid))
1308 return ERR_PTR(-EINVAL);
1309 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1310 return ERR_PTR(-EINVAL);
1311
1312 len = (sizeof(*tlv) + sizeof(*cmd)) +
1313 (sizeof(*tlv) + sizeof(*ch)) +
1314 (sizeof(*tlv) + 0);
1315 skb = ath10k_wmi_alloc_skb(ar, len);
1316 if (!skb)
1317 return ERR_PTR(-ENOMEM);
1318
1319 if (arg->hidden_ssid)
1320 flags |= WMI_VDEV_START_HIDDEN_SSID;
1321 if (arg->pmf_enabled)
1322 flags |= WMI_VDEV_START_PMF_ENABLED;
1323
1324 ptr = (void *)skb->data;
1325
1326 tlv = ptr;
1327 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
1328 tlv->len = __cpu_to_le16(sizeof(*cmd));
1329 cmd = (void *)tlv->value;
1330 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1331 cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
1332 cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
1333 cmd->flags = __cpu_to_le32(flags);
1334 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
1335 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
1336 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
1337
1338 if (arg->ssid) {
1339 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
1340 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1341 }
1342
1343 ptr += sizeof(*tlv);
1344 ptr += sizeof(*cmd);
1345
1346 tlv = ptr;
1347 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
1348 tlv->len = __cpu_to_le16(sizeof(*ch));
1349 ch = (void *)tlv->value;
1350 ath10k_wmi_put_wmi_channel(ch, &arg->channel);
1351
1352 ptr += sizeof(*tlv);
1353 ptr += sizeof(*ch);
1354
1355 tlv = ptr;
1356 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1357 tlv->len = 0;
1358 noa = (void *)tlv->value;
1359
1360 /* Note: This is a nested TLV containing:
1361 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
1362 */
1363
1364 ptr += sizeof(*tlv);
1365 ptr += 0;
1366
1367 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
1368 return skb;
1369}
1370
1371static struct sk_buff *
1372ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
1373{
1374 struct wmi_vdev_stop_cmd *cmd;
1375 struct wmi_tlv *tlv;
1376 struct sk_buff *skb;
1377
1378 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1379 if (!skb)
1380 return ERR_PTR(-ENOMEM);
1381
1382 tlv = (void *)skb->data;
1383 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
1384 tlv->len = __cpu_to_le16(sizeof(*cmd));
1385 cmd = (void *)tlv->value;
1386 cmd->vdev_id = __cpu_to_le32(vdev_id);
1387
1388 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
1389 return skb;
1390}
1391
1392static struct sk_buff *
1393ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
1394 const u8 *bssid)
1395
1396{
1397 struct wmi_vdev_up_cmd *cmd;
1398 struct wmi_tlv *tlv;
1399 struct sk_buff *skb;
1400
1401 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1402 if (!skb)
1403 return ERR_PTR(-ENOMEM);
1404
1405 tlv = (void *)skb->data;
1406 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
1407 tlv->len = __cpu_to_le16(sizeof(*cmd));
1408 cmd = (void *)tlv->value;
1409 cmd->vdev_id = __cpu_to_le32(vdev_id);
1410 cmd->vdev_assoc_id = __cpu_to_le32(aid);
1411 ether_addr_copy(cmd->vdev_bssid.addr, bssid);
1412
1413 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
1414 return skb;
1415}
1416
1417static struct sk_buff *
1418ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
1419{
1420 struct wmi_vdev_down_cmd *cmd;
1421 struct wmi_tlv *tlv;
1422 struct sk_buff *skb;
1423
1424 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1425 if (!skb)
1426 return ERR_PTR(-ENOMEM);
1427
1428 tlv = (void *)skb->data;
1429 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
1430 tlv->len = __cpu_to_le16(sizeof(*cmd));
1431 cmd = (void *)tlv->value;
1432 cmd->vdev_id = __cpu_to_le32(vdev_id);
1433
1434 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
1435 return skb;
1436}
1437
1438static struct sk_buff *
1439ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
1440 u32 param_id, u32 param_value)
1441{
1442 struct wmi_vdev_set_param_cmd *cmd;
1443 struct wmi_tlv *tlv;
1444 struct sk_buff *skb;
1445
1446 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1447 if (!skb)
1448 return ERR_PTR(-ENOMEM);
1449
1450 tlv = (void *)skb->data;
1451 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
1452 tlv->len = __cpu_to_le16(sizeof(*cmd));
1453 cmd = (void *)tlv->value;
1454 cmd->vdev_id = __cpu_to_le32(vdev_id);
1455 cmd->param_id = __cpu_to_le32(param_id);
1456 cmd->param_value = __cpu_to_le32(param_value);
1457
1458 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
1459 return skb;
1460}
1461
1462static struct sk_buff *
1463ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
1464 const struct wmi_vdev_install_key_arg *arg)
1465{
1466 struct wmi_vdev_install_key_cmd *cmd;
1467 struct wmi_tlv *tlv;
1468 struct sk_buff *skb;
1469 size_t len;
1470 void *ptr;
1471
1472 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
1473 return ERR_PTR(-EINVAL);
1474 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
1475 return ERR_PTR(-EINVAL);
1476
1477 len = sizeof(*tlv) + sizeof(*cmd) +
1478 sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
1479 skb = ath10k_wmi_alloc_skb(ar, len);
1480 if (!skb)
1481 return ERR_PTR(-ENOMEM);
1482
1483 ptr = (void *)skb->data;
1484 tlv = ptr;
1485 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
1486 tlv->len = __cpu_to_le16(sizeof(*cmd));
1487 cmd = (void *)tlv->value;
1488 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1489 cmd->key_idx = __cpu_to_le32(arg->key_idx);
1490 cmd->key_flags = __cpu_to_le32(arg->key_flags);
1491 cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
1492 cmd->key_len = __cpu_to_le32(arg->key_len);
1493 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
1494 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
1495
1496 if (arg->macaddr)
1497 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1498
1499 ptr += sizeof(*tlv);
1500 ptr += sizeof(*cmd);
1501
1502 tlv = ptr;
1503 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1504 tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
1505 if (arg->key_data)
1506 memcpy(tlv->value, arg->key_data, arg->key_len);
1507
1508 ptr += sizeof(*tlv);
1509 ptr += roundup(arg->key_len, sizeof(__le32));
1510
1511 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
1512 return skb;
1513}
1514
1515static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
1516 const struct wmi_sta_uapsd_auto_trig_arg *arg)
1517{
1518 struct wmi_sta_uapsd_auto_trig_param *ac;
1519 struct wmi_tlv *tlv;
1520
1521 tlv = ptr;
1522 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
1523 tlv->len = __cpu_to_le16(sizeof(*ac));
1524 ac = (void *)tlv->value;
1525
1526 ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
1527 ac->user_priority = __cpu_to_le32(arg->user_priority);
1528 ac->service_interval = __cpu_to_le32(arg->service_interval);
1529 ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
1530 ac->delay_interval = __cpu_to_le32(arg->delay_interval);
1531
1532 ath10k_dbg(ar, ATH10K_DBG_WMI,
1533 "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
1534 ac->wmm_ac, ac->user_priority, ac->service_interval,
1535 ac->suspend_interval, ac->delay_interval);
1536
1537 return ptr + sizeof(*tlv) + sizeof(*ac);
1538}
1539
1540static struct sk_buff *
1541ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
1542 const u8 peer_addr[ETH_ALEN],
1543 const struct wmi_sta_uapsd_auto_trig_arg *args,
1544 u32 num_ac)
1545{
1546 struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
1547 struct wmi_sta_uapsd_auto_trig_param *ac;
1548 struct wmi_tlv *tlv;
1549 struct sk_buff *skb;
1550 size_t len;
1551 size_t ac_tlv_len;
1552 void *ptr;
1553 int i;
1554
1555 ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
1556 len = sizeof(*tlv) + sizeof(*cmd) +
1557 sizeof(*tlv) + ac_tlv_len;
1558 skb = ath10k_wmi_alloc_skb(ar, len);
1559 if (!skb)
1560 return ERR_PTR(-ENOMEM);
1561
1562 ptr = (void *)skb->data;
1563 tlv = ptr;
1564 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
1565 tlv->len = __cpu_to_le16(sizeof(*cmd));
1566 cmd = (void *)tlv->value;
1567 cmd->vdev_id = __cpu_to_le32(vdev_id);
1568 cmd->num_ac = __cpu_to_le32(num_ac);
1569 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1570
1571 ptr += sizeof(*tlv);
1572 ptr += sizeof(*cmd);
1573
1574 tlv = ptr;
1575 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1576 tlv->len = __cpu_to_le16(ac_tlv_len);
1577 ac = (void *)tlv->value;
1578
1579 ptr += sizeof(*tlv);
1580 for (i = 0; i < num_ac; i++)
1581 ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
1582
1583 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
1584 return skb;
1585}
1586
1587static void *ath10k_wmi_tlv_put_wmm(void *ptr,
1588 const struct wmi_wmm_params_arg *arg)
1589{
1590 struct wmi_wmm_params *wmm;
1591 struct wmi_tlv *tlv;
1592
1593 tlv = ptr;
1594 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
1595 tlv->len = __cpu_to_le16(sizeof(*wmm));
1596 wmm = (void *)tlv->value;
1597 ath10k_wmi_set_wmm_param(wmm, arg);
1598
1599 return ptr + sizeof(*tlv) + sizeof(*wmm);
1600}
1601
1602static struct sk_buff *
1603ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
1604 const struct wmi_wmm_params_all_arg *arg)
1605{
1606 struct wmi_tlv_vdev_set_wmm_cmd *cmd;
1607 struct wmi_wmm_params *wmm;
1608 struct wmi_tlv *tlv;
1609 struct sk_buff *skb;
1610 size_t len;
1611 void *ptr;
1612
1613 len = (sizeof(*tlv) + sizeof(*cmd)) +
1614 (4 * (sizeof(*tlv) + sizeof(*wmm)));
1615 skb = ath10k_wmi_alloc_skb(ar, len);
1616 if (!skb)
1617 return ERR_PTR(-ENOMEM);
1618
1619 ptr = (void *)skb->data;
1620 tlv = ptr;
1621 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
1622 tlv->len = __cpu_to_le16(sizeof(*cmd));
1623 cmd = (void *)tlv->value;
1624 cmd->vdev_id = __cpu_to_le32(vdev_id);
1625
1626 ptr += sizeof(*tlv);
1627 ptr += sizeof(*cmd);
1628
1629 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
1630 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
1631 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
1632 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
1633
1634 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
1635 return skb;
1636}
1637
1638static struct sk_buff *
1639ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
1640 const struct wmi_sta_keepalive_arg *arg)
1641{
1642 struct wmi_tlv_sta_keepalive_cmd *cmd;
1643 struct wmi_sta_keepalive_arp_resp *arp;
1644 struct sk_buff *skb;
1645 struct wmi_tlv *tlv;
1646 void *ptr;
1647 size_t len;
1648
1649 len = sizeof(*tlv) + sizeof(*cmd) +
1650 sizeof(*tlv) + sizeof(*arp);
1651 skb = ath10k_wmi_alloc_skb(ar, len);
1652 if (!skb)
1653 return ERR_PTR(-ENOMEM);
1654
1655 ptr = (void *)skb->data;
1656 tlv = ptr;
1657 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
1658 tlv->len = __cpu_to_le16(sizeof(*cmd));
1659 cmd = (void *)tlv->value;
1660 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1661 cmd->enabled = __cpu_to_le32(arg->enabled);
1662 cmd->method = __cpu_to_le32(arg->method);
1663 cmd->interval = __cpu_to_le32(arg->interval);
1664
1665 ptr += sizeof(*tlv);
1666 ptr += sizeof(*cmd);
1667
1668 tlv = ptr;
1669 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
1670 tlv->len = __cpu_to_le16(sizeof(*arp));
1671 arp = (void *)tlv->value;
1672
1673 arp->src_ip4_addr = arg->src_ip4_addr;
1674 arp->dest_ip4_addr = arg->dest_ip4_addr;
1675 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
1676
1677 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
1678 arg->vdev_id, arg->enabled, arg->method, arg->interval);
1679 return skb;
1680}
1681
1682static struct sk_buff *
1683ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
1684 const u8 peer_addr[ETH_ALEN])
1685{
1686 struct wmi_tlv_peer_create_cmd *cmd;
1687 struct wmi_tlv *tlv;
1688 struct sk_buff *skb;
1689
1690 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1691 if (!skb)
1692 return ERR_PTR(-ENOMEM);
1693
1694 tlv = (void *)skb->data;
1695 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
1696 tlv->len = __cpu_to_le16(sizeof(*cmd));
1697 cmd = (void *)tlv->value;
1698 cmd->vdev_id = __cpu_to_le32(vdev_id);
1699 cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
1700 ether_addr_copy(cmd->peer_addr.addr, peer_addr);
1701
1702 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
1703 return skb;
1704}
1705
1706static struct sk_buff *
1707ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
1708 const u8 peer_addr[ETH_ALEN])
1709{
1710 struct wmi_peer_delete_cmd *cmd;
1711 struct wmi_tlv *tlv;
1712 struct sk_buff *skb;
1713
1714 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1715 if (!skb)
1716 return ERR_PTR(-ENOMEM);
1717
1718 tlv = (void *)skb->data;
1719 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
1720 tlv->len = __cpu_to_le16(sizeof(*cmd));
1721 cmd = (void *)tlv->value;
1722 cmd->vdev_id = __cpu_to_le32(vdev_id);
1723 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1724
1725 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
1726 return skb;
1727}
1728
1729static struct sk_buff *
1730ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
1731 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
1732{
1733 struct wmi_peer_flush_tids_cmd *cmd;
1734 struct wmi_tlv *tlv;
1735 struct sk_buff *skb;
1736
1737 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1738 if (!skb)
1739 return ERR_PTR(-ENOMEM);
1740
1741 tlv = (void *)skb->data;
1742 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
1743 tlv->len = __cpu_to_le16(sizeof(*cmd));
1744 cmd = (void *)tlv->value;
1745 cmd->vdev_id = __cpu_to_le32(vdev_id);
1746 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
1747 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1748
1749 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
1750 return skb;
1751}
1752
1753static struct sk_buff *
1754ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
1755 const u8 *peer_addr,
1756 enum wmi_peer_param param_id,
1757 u32 param_value)
1758{
1759 struct wmi_peer_set_param_cmd *cmd;
1760 struct wmi_tlv *tlv;
1761 struct sk_buff *skb;
1762
1763 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1764 if (!skb)
1765 return ERR_PTR(-ENOMEM);
1766
1767 tlv = (void *)skb->data;
1768 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
1769 tlv->len = __cpu_to_le16(sizeof(*cmd));
1770 cmd = (void *)tlv->value;
1771 cmd->vdev_id = __cpu_to_le32(vdev_id);
1772 cmd->param_id = __cpu_to_le32(param_id);
1773 cmd->param_value = __cpu_to_le32(param_value);
1774 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1775
1776 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
1777 return skb;
1778}
1779
1780static struct sk_buff *
1781ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
1782 const struct wmi_peer_assoc_complete_arg *arg)
1783{
1784 struct wmi_tlv_peer_assoc_cmd *cmd;
1785 struct wmi_vht_rate_set *vht_rate;
1786 struct wmi_tlv *tlv;
1787 struct sk_buff *skb;
1788 size_t len, legacy_rate_len, ht_rate_len;
1789 void *ptr;
1790
1791 if (arg->peer_mpdu_density > 16)
1792 return ERR_PTR(-EINVAL);
1793 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
1794 return ERR_PTR(-EINVAL);
1795 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
1796 return ERR_PTR(-EINVAL);
1797
1798 legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
1799 sizeof(__le32));
1800 ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
1801 len = (sizeof(*tlv) + sizeof(*cmd)) +
1802 (sizeof(*tlv) + legacy_rate_len) +
1803 (sizeof(*tlv) + ht_rate_len) +
1804 (sizeof(*tlv) + sizeof(*vht_rate));
1805 skb = ath10k_wmi_alloc_skb(ar, len);
1806 if (!skb)
1807 return ERR_PTR(-ENOMEM);
1808
1809 ptr = (void *)skb->data;
1810 tlv = ptr;
1811 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
1812 tlv->len = __cpu_to_le16(sizeof(*cmd));
1813 cmd = (void *)tlv->value;
1814
1815 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1816 cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
1817 cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
1818 cmd->flags = __cpu_to_le32(arg->peer_flags);
1819 cmd->caps = __cpu_to_le32(arg->peer_caps);
1820 cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
1821 cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
1822 cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
1823 cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
1824 cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
1825 cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
1826 cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
1827 cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
1828 cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
1829 cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
1830 ether_addr_copy(cmd->mac_addr.addr, arg->addr);
1831
1832 ptr += sizeof(*tlv);
1833 ptr += sizeof(*cmd);
1834
1835 tlv = ptr;
1836 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1837 tlv->len = __cpu_to_le16(legacy_rate_len);
1838 memcpy(tlv->value, arg->peer_legacy_rates.rates,
1839 arg->peer_legacy_rates.num_rates);
1840
1841 ptr += sizeof(*tlv);
1842 ptr += legacy_rate_len;
1843
1844 tlv = ptr;
1845 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1846 tlv->len = __cpu_to_le16(ht_rate_len);
1847 memcpy(tlv->value, arg->peer_ht_rates.rates,
1848 arg->peer_ht_rates.num_rates);
1849
1850 ptr += sizeof(*tlv);
1851 ptr += ht_rate_len;
1852
1853 tlv = ptr;
1854 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
1855 tlv->len = __cpu_to_le16(sizeof(*vht_rate));
1856 vht_rate = (void *)tlv->value;
1857
1858 vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
1859 vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
1860 vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
1861 vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
1862
1863 ptr += sizeof(*tlv);
1864 ptr += sizeof(*vht_rate);
1865
1866 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
1867 return skb;
1868}
1869
1870static struct sk_buff *
1871ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
1872 enum wmi_sta_ps_mode psmode)
1873{
1874 struct wmi_sta_powersave_mode_cmd *cmd;
1875 struct wmi_tlv *tlv;
1876 struct sk_buff *skb;
1877
1878 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1879 if (!skb)
1880 return ERR_PTR(-ENOMEM);
1881
1882 tlv = (void *)skb->data;
1883 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
1884 tlv->len = __cpu_to_le16(sizeof(*cmd));
1885 cmd = (void *)tlv->value;
1886 cmd->vdev_id = __cpu_to_le32(vdev_id);
1887 cmd->sta_ps_mode = __cpu_to_le32(psmode);
1888
1889 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
1890 return skb;
1891}
1892
1893static struct sk_buff *
1894ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
1895 enum wmi_sta_powersave_param param_id,
1896 u32 param_value)
1897{
1898 struct wmi_sta_powersave_param_cmd *cmd;
1899 struct wmi_tlv *tlv;
1900 struct sk_buff *skb;
1901
1902 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1903 if (!skb)
1904 return ERR_PTR(-ENOMEM);
1905
1906 tlv = (void *)skb->data;
1907 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
1908 tlv->len = __cpu_to_le16(sizeof(*cmd));
1909 cmd = (void *)tlv->value;
1910 cmd->vdev_id = __cpu_to_le32(vdev_id);
1911 cmd->param_id = __cpu_to_le32(param_id);
1912 cmd->param_value = __cpu_to_le32(param_value);
1913
1914 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
1915 return skb;
1916}
1917
1918static struct sk_buff *
1919ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1920 enum wmi_ap_ps_peer_param param_id, u32 value)
1921{
1922 struct wmi_ap_ps_peer_cmd *cmd;
1923 struct wmi_tlv *tlv;
1924 struct sk_buff *skb;
1925
1926 if (!mac)
1927 return ERR_PTR(-EINVAL);
1928
1929 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
1930 if (!skb)
1931 return ERR_PTR(-ENOMEM);
1932
1933 tlv = (void *)skb->data;
1934 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
1935 tlv->len = __cpu_to_le16(sizeof(*cmd));
1936 cmd = (void *)tlv->value;
1937 cmd->vdev_id = __cpu_to_le32(vdev_id);
1938 cmd->param_id = __cpu_to_le32(param_id);
1939 cmd->param_value = __cpu_to_le32(value);
1940 ether_addr_copy(cmd->peer_macaddr.addr, mac);
1941
1942 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
1943 return skb;
1944}
1945
1946static struct sk_buff *
1947ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
1948 const struct wmi_scan_chan_list_arg *arg)
1949{
1950 struct wmi_tlv_scan_chan_list_cmd *cmd;
1951 struct wmi_channel *ci;
1952 struct wmi_channel_arg *ch;
1953 struct wmi_tlv *tlv;
1954 struct sk_buff *skb;
1955 size_t chans_len, len;
1956 int i;
1957 void *ptr, *chans;
1958
1959 chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
1960 len = (sizeof(*tlv) + sizeof(*cmd)) +
1961 (sizeof(*tlv) + chans_len);
1962
1963 skb = ath10k_wmi_alloc_skb(ar, len);
1964 if (!skb)
1965 return ERR_PTR(-ENOMEM);
1966
1967 ptr = (void *)skb->data;
1968 tlv = ptr;
1969 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
1970 tlv->len = __cpu_to_le16(sizeof(*cmd));
1971 cmd = (void *)tlv->value;
1972 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
1973
1974 ptr += sizeof(*tlv);
1975 ptr += sizeof(*cmd);
1976
1977 tlv = ptr;
1978 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1979 tlv->len = __cpu_to_le16(chans_len);
1980 chans = (void *)tlv->value;
1981
1982 for (i = 0; i < arg->n_channels; i++) {
1983 ch = &arg->channels[i];
1984
1985 tlv = chans;
1986 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
1987 tlv->len = __cpu_to_le16(sizeof(*ci));
1988 ci = (void *)tlv->value;
1989
1990 ath10k_wmi_put_wmi_channel(ci, ch);
1991
1992 chans += sizeof(*tlv);
1993 chans += sizeof(*ci);
1994 }
1995
1996 ptr += sizeof(*tlv);
1997 ptr += chans_len;
1998
1999 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
2000 return skb;
2001}
2002
2003static struct sk_buff *
2004ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
2005 const void *bcn, size_t bcn_len,
2006 u32 bcn_paddr, bool dtim_zero,
2007 bool deliver_cab)
2008
2009{
2010 struct wmi_bcn_tx_ref_cmd *cmd;
2011 struct wmi_tlv *tlv;
2012 struct sk_buff *skb;
2013 struct ieee80211_hdr *hdr;
2014 u16 fc;
2015
2016 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2017 if (!skb)
2018 return ERR_PTR(-ENOMEM);
2019
2020 hdr = (struct ieee80211_hdr *)bcn;
2021 fc = le16_to_cpu(hdr->frame_control);
2022
2023 tlv = (void *)skb->data;
2024 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
2025 tlv->len = __cpu_to_le16(sizeof(*cmd));
2026 cmd = (void *)tlv->value;
2027 cmd->vdev_id = __cpu_to_le32(vdev_id);
2028 cmd->data_len = __cpu_to_le32(bcn_len);
2029 cmd->data_ptr = __cpu_to_le32(bcn_paddr);
2030 cmd->msdu_id = 0;
2031 cmd->frame_control = __cpu_to_le32(fc);
2032 cmd->flags = 0;
2033
2034 if (dtim_zero)
2035 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
2036
2037 if (deliver_cab)
2038 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
2039
2040 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
2041 return skb;
2042}
2043
2044static struct sk_buff *
2045ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
2046 const struct wmi_wmm_params_all_arg *arg)
2047{
2048 struct wmi_tlv_pdev_set_wmm_cmd *cmd;
2049 struct wmi_wmm_params *wmm;
2050 struct wmi_tlv *tlv;
2051 struct sk_buff *skb;
2052 size_t len;
2053 void *ptr;
2054
2055 len = (sizeof(*tlv) + sizeof(*cmd)) +
2056 (4 * (sizeof(*tlv) + sizeof(*wmm)));
2057 skb = ath10k_wmi_alloc_skb(ar, len);
2058 if (!skb)
2059 return ERR_PTR(-ENOMEM);
2060
2061 ptr = (void *)skb->data;
2062
2063 tlv = ptr;
2064 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
2065 tlv->len = __cpu_to_le16(sizeof(*cmd));
2066 cmd = (void *)tlv->value;
2067
2068 /* nothing to set here */
2069
2070 ptr += sizeof(*tlv);
2071 ptr += sizeof(*cmd);
2072
2073 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
2074 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
2075 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
2076 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
2077
2078 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
2079 return skb;
2080}
2081
2082static struct sk_buff *
2083ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar,
2084 enum wmi_stats_id stats_id)
2085{
2086 struct wmi_request_stats_cmd *cmd;
2087 struct wmi_tlv *tlv;
2088 struct sk_buff *skb;
2089
2090 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2091 if (!skb)
2092 return ERR_PTR(-ENOMEM);
2093
2094 tlv = (void *)skb->data;
2095 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
2096 tlv->len = __cpu_to_le16(sizeof(*cmd));
2097 cmd = (void *)tlv->value;
2098 cmd->stats_id = __cpu_to_le32(stats_id);
2099
2100 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
2101 return skb;
2102}
2103
2104static struct sk_buff *
2105ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
2106 enum wmi_force_fw_hang_type type,
2107 u32 delay_ms)
2108{
2109 struct wmi_force_fw_hang_cmd *cmd;
2110 struct wmi_tlv *tlv;
2111 struct sk_buff *skb;
2112
2113 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2114 if (!skb)
2115 return ERR_PTR(-ENOMEM);
2116
2117 tlv = (void *)skb->data;
2118 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
2119 tlv->len = __cpu_to_le16(sizeof(*cmd));
2120 cmd = (void *)tlv->value;
2121 cmd->type = __cpu_to_le32(type);
2122 cmd->delay_ms = __cpu_to_le32(delay_ms);
2123
2124 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
2125 return skb;
2126}
2127
2128static struct sk_buff *
2129ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
2130 u32 log_level) {
2131 struct wmi_tlv_dbglog_cmd *cmd;
2132 struct wmi_tlv *tlv;
2133 struct sk_buff *skb;
2134 size_t len, bmap_len;
2135 u32 value;
2136 void *ptr;
2137
2138 if (module_enable) {
2139 value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
2140 module_enable,
2141 WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
2142 } else {
2143 value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
2144 WMI_TLV_DBGLOG_ALL_MODULES,
2145 WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
2146 }
2147
2148 bmap_len = 0;
2149 len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
2150 skb = ath10k_wmi_alloc_skb(ar, len);
2151 if (!skb)
2152 return ERR_PTR(-ENOMEM);
2153
2154 ptr = (void *)skb->data;
2155
2156 tlv = ptr;
2157 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
2158 tlv->len = __cpu_to_le16(sizeof(*cmd));
2159 cmd = (void *)tlv->value;
2160 cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
2161 cmd->value = __cpu_to_le32(value);
2162
2163 ptr += sizeof(*tlv);
2164 ptr += sizeof(*cmd);
2165
2166 tlv = ptr;
2167 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
2168 tlv->len = __cpu_to_le16(bmap_len);
2169
2170 /* nothing to do here */
2171
2172 ptr += sizeof(*tlv);
2173 ptr += sizeof(bmap_len);
2174
2175 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
2176 return skb;
2177}
2178
2179static struct sk_buff *
2180ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
2181{
2182 struct wmi_tlv_pktlog_enable *cmd;
2183 struct wmi_tlv *tlv;
2184 struct sk_buff *skb;
2185 void *ptr;
2186 size_t len;
2187
2188 len = sizeof(*tlv) + sizeof(*cmd);
2189 skb = ath10k_wmi_alloc_skb(ar, len);
2190 if (!skb)
2191 return ERR_PTR(-ENOMEM);
2192
2193 ptr = (void *)skb->data;
2194 tlv = ptr;
2195 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
2196 tlv->len = __cpu_to_le16(sizeof(*cmd));
2197 cmd = (void *)tlv->value;
2198 cmd->filter = __cpu_to_le32(filter);
2199
2200 ptr += sizeof(*tlv);
2201 ptr += sizeof(*cmd);
2202
2203 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
2204 filter);
2205 return skb;
2206}
2207
2208static struct sk_buff *
2209ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
2210{
2211 struct wmi_tlv_pktlog_disable *cmd;
2212 struct wmi_tlv *tlv;
2213 struct sk_buff *skb;
2214 void *ptr;
2215 size_t len;
2216
2217 len = sizeof(*tlv) + sizeof(*cmd);
2218 skb = ath10k_wmi_alloc_skb(ar, len);
2219 if (!skb)
2220 return ERR_PTR(-ENOMEM);
2221
2222 ptr = (void *)skb->data;
2223 tlv = ptr;
2224 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
2225 tlv->len = __cpu_to_le16(sizeof(*cmd));
2226 cmd = (void *)tlv->value;
2227
2228 ptr += sizeof(*tlv);
2229 ptr += sizeof(*cmd);
2230
2231 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
2232 return skb;
2233}
2234
2235static struct sk_buff *
2236ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
2237 u32 tim_ie_offset, struct sk_buff *bcn,
2238 u32 prb_caps, u32 prb_erp, void *prb_ies,
2239 size_t prb_ies_len)
2240{
2241 struct wmi_tlv_bcn_tmpl_cmd *cmd;
2242 struct wmi_tlv_bcn_prb_info *info;
2243 struct wmi_tlv *tlv;
2244 struct sk_buff *skb;
2245 void *ptr;
2246 size_t len;
2247
2248 if (WARN_ON(prb_ies_len > 0 && !prb_ies))
2249 return ERR_PTR(-EINVAL);
2250
2251 len = sizeof(*tlv) + sizeof(*cmd) +
2252 sizeof(*tlv) + sizeof(*info) + prb_ies_len +
2253 sizeof(*tlv) + roundup(bcn->len, 4);
2254 skb = ath10k_wmi_alloc_skb(ar, len);
2255 if (!skb)
2256 return ERR_PTR(-ENOMEM);
2257
2258 ptr = (void *)skb->data;
2259 tlv = ptr;
2260 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
2261 tlv->len = __cpu_to_le16(sizeof(*cmd));
2262 cmd = (void *)tlv->value;
2263 cmd->vdev_id = __cpu_to_le32(vdev_id);
2264 cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
2265 cmd->buf_len = __cpu_to_le32(bcn->len);
2266
2267 ptr += sizeof(*tlv);
2268 ptr += sizeof(*cmd);
2269
2270 /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
2271 * then it is then impossible to pass original ie len.
2272 * This chunk is not used yet so if setting probe resp template yields
2273 * problems with beaconing or crashes firmware look here.
2274 */
2275 tlv = ptr;
2276 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
2277 tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
2278 info = (void *)tlv->value;
2279 info->caps = __cpu_to_le32(prb_caps);
2280 info->erp = __cpu_to_le32(prb_erp);
2281 memcpy(info->ies, prb_ies, prb_ies_len);
2282
2283 ptr += sizeof(*tlv);
2284 ptr += sizeof(*info);
2285 ptr += prb_ies_len;
2286
2287 tlv = ptr;
2288 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2289 tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
2290 memcpy(tlv->value, bcn->data, bcn->len);
2291
2292 /* FIXME: Adjust TSF? */
2293
2294 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
2295 vdev_id);
2296 return skb;
2297}
2298
2299static struct sk_buff *
2300ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
2301 struct sk_buff *prb)
2302{
2303 struct wmi_tlv_prb_tmpl_cmd *cmd;
2304 struct wmi_tlv_bcn_prb_info *info;
2305 struct wmi_tlv *tlv;
2306 struct sk_buff *skb;
2307 void *ptr;
2308 size_t len;
2309
2310 len = sizeof(*tlv) + sizeof(*cmd) +
2311 sizeof(*tlv) + sizeof(*info) +
2312 sizeof(*tlv) + roundup(prb->len, 4);
2313 skb = ath10k_wmi_alloc_skb(ar, len);
2314 if (!skb)
2315 return ERR_PTR(-ENOMEM);
2316
2317 ptr = (void *)skb->data;
2318 tlv = ptr;
2319 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
2320 tlv->len = __cpu_to_le16(sizeof(*cmd));
2321 cmd = (void *)tlv->value;
2322 cmd->vdev_id = __cpu_to_le32(vdev_id);
2323 cmd->buf_len = __cpu_to_le32(prb->len);
2324
2325 ptr += sizeof(*tlv);
2326 ptr += sizeof(*cmd);
2327
2328 tlv = ptr;
2329 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
2330 tlv->len = __cpu_to_le16(sizeof(*info));
2331 info = (void *)tlv->value;
2332 info->caps = 0;
2333 info->erp = 0;
2334
2335 ptr += sizeof(*tlv);
2336 ptr += sizeof(*info);
2337
2338 tlv = ptr;
2339 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2340 tlv->len = __cpu_to_le16(roundup(prb->len, 4));
2341 memcpy(tlv->value, prb->data, prb->len);
2342
2343 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
2344 vdev_id);
2345 return skb;
2346}
2347
2348static struct sk_buff *
2349ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
2350 const u8 *p2p_ie)
2351{
2352 struct wmi_tlv_p2p_go_bcn_ie *cmd;
2353 struct wmi_tlv *tlv;
2354 struct sk_buff *skb;
2355 void *ptr;
2356 size_t len;
2357
2358 len = sizeof(*tlv) + sizeof(*cmd) +
2359 sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
2360 skb = ath10k_wmi_alloc_skb(ar, len);
2361 if (!skb)
2362 return ERR_PTR(-ENOMEM);
2363
2364 ptr = (void *)skb->data;
2365 tlv = ptr;
2366 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
2367 tlv->len = __cpu_to_le16(sizeof(*cmd));
2368 cmd = (void *)tlv->value;
2369 cmd->vdev_id = __cpu_to_le32(vdev_id);
2370 cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
2371
2372 ptr += sizeof(*tlv);
2373 ptr += sizeof(*cmd);
2374
2375 tlv = ptr;
2376 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2377 tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
2378 memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
2379
2380 ptr += sizeof(*tlv);
2381 ptr += roundup(p2p_ie[1] + 2, 4);
2382
2383 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
2384 vdev_id);
2385 return skb;
2386}
2387
2388/****************/
2389/* TLV mappings */
2390/****************/
2391
2392static struct wmi_cmd_map wmi_tlv_cmd_map = {
2393 .init_cmdid = WMI_TLV_INIT_CMDID,
2394 .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
2395 .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
2396 .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
2397 .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
2398 .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
2399 .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
2400 .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
2401 .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
2402 .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
2403 .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
2404 .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
2405 .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
2406 .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
2407 .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
2408 .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
2409 .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
2410 .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
2411 .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
2412 .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
2413 .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
2414 .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
2415 .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
2416 .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
2417 .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
2418 .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
2419 .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
2420 .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
2421 .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
2422 .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
2423 .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
2424 .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
2425 .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
2426 .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
2427 .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
2428 .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
2429 .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
2430 .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
2431 .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
2432 .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
2433 .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
2434 .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
2435 .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
2436 .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
2437 .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
2438 .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
2439 .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
2440 .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
2441 .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
2442 .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
2443 .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
2444 .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
2445 .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
2446 .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
2447 .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
2448 .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
2449 .roam_scan_rssi_change_threshold =
2450 WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
2451 .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
2452 .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
2453 .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
2454 .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
2455 .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
2456 .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
2457 .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
2458 .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
2459 .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
2460 .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
2461 .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
2462 .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
2463 .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
2464 .wlan_profile_set_hist_intvl_cmdid =
2465 WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
2466 .wlan_profile_get_profile_data_cmdid =
2467 WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
2468 .wlan_profile_enable_profile_id_cmdid =
2469 WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
2470 .wlan_profile_list_profile_id_cmdid =
2471 WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
2472 .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
2473 .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
2474 .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
2475 .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
2476 .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
2477 .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
2478 .wow_enable_disable_wake_event_cmdid =
2479 WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
2480 .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
2481 .wow_hostwakeup_from_sleep_cmdid =
2482 WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
2483 .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
2484 .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
2485 .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
2486 .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
2487 .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
2488 .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
2489 .network_list_offload_config_cmdid =
2490 WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
2491 .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
2492 .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
2493 .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
2494 .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
2495 .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
2496 .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
2497 .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
2498 .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
2499 .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
2500 .echo_cmdid = WMI_TLV_ECHO_CMDID,
2501 .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
2502 .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
2503 .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
2504 .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
2505 .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
2506 .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
2507 .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
2508 .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
2509 .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
2510 .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
2511 .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
2512};
2513
2514static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
2515 .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
2516 .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
2517 .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
2518 .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
2519 .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
2520 .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
2521 .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
2522 .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
2523 .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
2524 .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
2525 .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
2526 .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
2527 .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
2528 .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
2529 .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
2530 .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
2531 .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
2532 .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
2533 .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
2534 .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
2535 .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
2536 .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
2537 .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
2538 .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
2539 .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
2540 .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
2541 .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
2542 .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
2543 .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
2544 .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
2545 .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
2546 .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
2547 .bcnflt_stats_update_period =
2548 WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
2549 .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
2550 .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
2551 .dcs = WMI_TLV_PDEV_PARAM_DCS,
2552 .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
2553 .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
2554 .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
2555 .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
2556 .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
2557 .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
2558 .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
2559 .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
2560 .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
2561 .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
2562 .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
2563 .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
2564 .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
2565};
2566
2567static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
2568 .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
2569 .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
2570 .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
2571 .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
2572 .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
2573 .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
2574 .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
2575 .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
2576 .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
2577 .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
2578 .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
2579 .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
2580 .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
2581 .wmi_vdev_oc_scheduler_air_time_limit =
2582 WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
2583 .wds = WMI_TLV_VDEV_PARAM_WDS,
2584 .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
2585 .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
2586 .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
2587 .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
2588 .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
2589 .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
2590 .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
2591 .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
2592 .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
2593 .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
2594 .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
2595 .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
2596 .sgi = WMI_TLV_VDEV_PARAM_SGI,
2597 .ldpc = WMI_TLV_VDEV_PARAM_LDPC,
2598 .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
2599 .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
2600 .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
2601 .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
2602 .nss = WMI_TLV_VDEV_PARAM_NSS,
2603 .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
2604 .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
2605 .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
2606 .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
2607 .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
2608 .ap_keepalive_min_idle_inactive_time_secs =
2609 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
2610 .ap_keepalive_max_idle_inactive_time_secs =
2611 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
2612 .ap_keepalive_max_unresponsive_time_secs =
2613 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
2614 .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
2615 .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
2616 .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
2617 .txbf = WMI_TLV_VDEV_PARAM_TXBF,
2618 .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
2619 .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
2620 .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
2621 .ap_detect_out_of_sync_sleeping_sta_time_secs =
2622 WMI_TLV_VDEV_PARAM_UNSUPPORTED,
2623};
2624
2625static const struct wmi_ops wmi_tlv_ops = {
2626 .rx = ath10k_wmi_tlv_op_rx,
2627 .map_svc = wmi_tlv_svc_map,
2628
2629 .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
2630 .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
2631 .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
2632 .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
2633 .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
2634 .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
2635 .pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev,
2636 .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
2637 .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
2638 .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
2639
2640 .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
2641 .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
2642 .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
2643 .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
2644 .gen_init = ath10k_wmi_tlv_op_gen_init,
2645 .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
2646 .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
2647 .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
2648 .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
2649 .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
2650 .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
2651 .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
2652 .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
2653 .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
2654 .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
2655 .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
2656 .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
2657 .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
2658 .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
2659 .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
2660 .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
2661 .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
2662 .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
2663 .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
2664 .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
2665 .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
2666 .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
2667 .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
2668 .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
2669 /* .gen_mgmt_tx = not implemented; HTT is used */
2670 .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
2671 .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
2672 .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
2673 /* .gen_pdev_set_quiet_mode not implemented */
2674 /* .gen_pdev_get_temperature not implemented */
2675 /* .gen_addba_clear_resp not implemented */
2676 /* .gen_addba_send not implemented */
2677 /* .gen_addba_set_resp not implemented */
2678 /* .gen_delba_send not implemented */
2679 .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
2680 .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
2681 .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
2682 .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
2683 .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
2684};
2685
2686/************/
2687/* TLV init */
2688/************/
2689
2690void ath10k_wmi_tlv_attach(struct ath10k *ar)
2691{
2692 ar->wmi.cmd = &wmi_tlv_cmd_map;
2693 ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
2694 ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
2695 ar->wmi.ops = &wmi_tlv_ops;
2696}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
new file mode 100644
index 000000000000..de68fe76eae6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -0,0 +1,1444 @@
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17#ifndef _WMI_TLV_H
18#define _WMI_TLV_H
19
20#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
21#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
22#define WMI_TLV_CMD_UNSUPPORTED 0
23#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
24#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
25
26enum wmi_tlv_grp_id {
27 WMI_TLV_GRP_START = 0x3,
28 WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START,
29 WMI_TLV_GRP_PDEV,
30 WMI_TLV_GRP_VDEV,
31 WMI_TLV_GRP_PEER,
32 WMI_TLV_GRP_MGMT,
33 WMI_TLV_GRP_BA_NEG,
34 WMI_TLV_GRP_STA_PS,
35 WMI_TLV_GRP_DFS,
36 WMI_TLV_GRP_ROAM,
37 WMI_TLV_GRP_OFL_SCAN,
38 WMI_TLV_GRP_P2P,
39 WMI_TLV_GRP_AP_PS,
40 WMI_TLV_GRP_RATECTL,
41 WMI_TLV_GRP_PROFILE,
42 WMI_TLV_GRP_SUSPEND,
43 WMI_TLV_GRP_BCN_FILTER,
44 WMI_TLV_GRP_WOW,
45 WMI_TLV_GRP_RTT,
46 WMI_TLV_GRP_SPECTRAL,
47 WMI_TLV_GRP_STATS,
48 WMI_TLV_GRP_ARP_NS_OFL,
49 WMI_TLV_GRP_NLO_OFL,
50 WMI_TLV_GRP_GTK_OFL,
51 WMI_TLV_GRP_CSA_OFL,
52 WMI_TLV_GRP_CHATTER,
53 WMI_TLV_GRP_TID_ADDBA,
54 WMI_TLV_GRP_MISC,
55 WMI_TLV_GRP_GPIO,
56 WMI_TLV_GRP_FWTEST,
57 WMI_TLV_GRP_TDLS,
58 WMI_TLV_GRP_RESMGR,
59 WMI_TLV_GRP_STA_SMPS,
60 WMI_TLV_GRP_WLAN_HB,
61 WMI_TLV_GRP_RMC,
62 WMI_TLV_GRP_MHF_OFL,
63 WMI_TLV_GRP_LOCATION_SCAN,
64 WMI_TLV_GRP_OEM,
65 WMI_TLV_GRP_NAN,
66 WMI_TLV_GRP_COEX,
67 WMI_TLV_GRP_OBSS_OFL,
68 WMI_TLV_GRP_LPI,
69 WMI_TLV_GRP_EXTSCAN,
70 WMI_TLV_GRP_DHCP_OFL,
71 WMI_TLV_GRP_IPA,
72 WMI_TLV_GRP_MDNS_OFL,
73 WMI_TLV_GRP_SAP_OFL,
74};
75
76enum wmi_tlv_cmd_id {
77 WMI_TLV_INIT_CMDID = 0x1,
78 WMI_TLV_START_SCAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SCAN),
79 WMI_TLV_STOP_SCAN_CMDID,
80 WMI_TLV_SCAN_CHAN_LIST_CMDID,
81 WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
82 WMI_TLV_SCAN_UPDATE_REQUEST_CMDID,
83 WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
84 WMI_TLV_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PDEV),
85 WMI_TLV_PDEV_SET_CHANNEL_CMDID,
86 WMI_TLV_PDEV_SET_PARAM_CMDID,
87 WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
88 WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
89 WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
90 WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
91 WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
92 WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
93 WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
94 WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
95 WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
96 WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
97 WMI_TLV_PDEV_DUMP_CMDID,
98 WMI_TLV_PDEV_SET_LED_CONFIG_CMDID,
99 WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
100 WMI_TLV_PDEV_SET_LED_FLASHING_CMDID,
101 WMI_TLV_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_VDEV),
102 WMI_TLV_VDEV_DELETE_CMDID,
103 WMI_TLV_VDEV_START_REQUEST_CMDID,
104 WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
105 WMI_TLV_VDEV_UP_CMDID,
106 WMI_TLV_VDEV_STOP_CMDID,
107 WMI_TLV_VDEV_DOWN_CMDID,
108 WMI_TLV_VDEV_SET_PARAM_CMDID,
109 WMI_TLV_VDEV_INSTALL_KEY_CMDID,
110 WMI_TLV_VDEV_WNM_SLEEPMODE_CMDID,
111 WMI_TLV_VDEV_WMM_ADDTS_CMDID,
112 WMI_TLV_VDEV_WMM_DELTS_CMDID,
113 WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
114 WMI_TLV_VDEV_SET_GTX_PARAMS_CMDID,
115 WMI_TLV_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
116 WMI_TLV_VDEV_PLMREQ_START_CMDID,
117 WMI_TLV_VDEV_PLMREQ_STOP_CMDID,
118 WMI_TLV_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PEER),
119 WMI_TLV_PEER_DELETE_CMDID,
120 WMI_TLV_PEER_FLUSH_TIDS_CMDID,
121 WMI_TLV_PEER_SET_PARAM_CMDID,
122 WMI_TLV_PEER_ASSOC_CMDID,
123 WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
124 WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
125 WMI_TLV_PEER_MCAST_GROUP_CMDID,
126 WMI_TLV_PEER_INFO_REQ_CMDID,
127 WMI_TLV_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
128 WMI_TLV_BCN_TX_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MGMT),
129 WMI_TLV_PDEV_SEND_BCN_CMDID,
130 WMI_TLV_BCN_TMPL_CMDID,
131 WMI_TLV_BCN_FILTER_RX_CMDID,
132 WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
133 WMI_TLV_MGMT_TX_CMDID,
134 WMI_TLV_PRB_TMPL_CMDID,
135 WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
136 WMI_TLV_ADDBA_SEND_CMDID,
137 WMI_TLV_ADDBA_STATUS_CMDID,
138 WMI_TLV_DELBA_SEND_CMDID,
139 WMI_TLV_ADDBA_SET_RESP_CMDID,
140 WMI_TLV_SEND_SINGLEAMSDU_CMDID,
141 WMI_TLV_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_PS),
142 WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
143 WMI_TLV_STA_MIMO_PS_MODE_CMDID,
144 WMI_TLV_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_DFS),
145 WMI_TLV_PDEV_DFS_DISABLE_CMDID,
146 WMI_TLV_DFS_PHYERR_FILTER_ENA_CMDID,
147 WMI_TLV_DFS_PHYERR_FILTER_DIS_CMDID,
148 WMI_TLV_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_TLV_GRP_ROAM),
149 WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
150 WMI_TLV_ROAM_SCAN_PERIOD,
151 WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
152 WMI_TLV_ROAM_AP_PROFILE,
153 WMI_TLV_ROAM_CHAN_LIST,
154 WMI_TLV_ROAM_SCAN_CMD,
155 WMI_TLV_ROAM_SYNCH_COMPLETE,
156 WMI_TLV_ROAM_SET_RIC_REQUEST_CMDID,
157 WMI_TLV_ROAM_INVOKE_CMDID,
158 WMI_TLV_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_TLV_GRP_OFL_SCAN),
159 WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
160 WMI_TLV_OFL_SCAN_PERIOD,
161 WMI_TLV_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_TLV_GRP_P2P),
162 WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
163 WMI_TLV_P2P_GO_SET_BEACON_IE,
164 WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
165 WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
166 WMI_TLV_P2P_DISC_OFFLOAD_CONFIG_CMDID,
167 WMI_TLV_P2P_DISC_OFFLOAD_APPIE_CMDID,
168 WMI_TLV_P2P_DISC_OFFLOAD_PATTERN_CMDID,
169 WMI_TLV_P2P_SET_OPPPS_PARAM_CMDID,
170 WMI_TLV_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_AP_PS),
171 WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
172 WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RATECTL),
173 WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PROFILE),
174 WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
175 WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
176 WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
177 WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
178 WMI_TLV_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SUSPEND),
179 WMI_TLV_PDEV_RESUME_CMDID,
180 WMI_TLV_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BCN_FILTER),
181 WMI_TLV_RMV_BCN_FILTER_CMDID,
182 WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WOW),
183 WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
184 WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
185 WMI_TLV_WOW_ENABLE_CMDID,
186 WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
187 WMI_TLV_WOW_ACER_IOAC_ADD_KEEPALIVE_CMDID,
188 WMI_TLV_WOW_ACER_IOAC_DEL_KEEPALIVE_CMDID,
189 WMI_TLV_WOW_ACER_IOAC_ADD_WAKE_PATTERN_CMDID,
190 WMI_TLV_WOW_ACER_IOAC_DEL_WAKE_PATTERN_CMDID,
191 WMI_TLV_D0_WOW_ENABLE_DISABLE_CMDID,
192 WMI_TLV_EXTWOW_ENABLE_CMDID,
193 WMI_TLV_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
194 WMI_TLV_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
195 WMI_TLV_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RTT),
196 WMI_TLV_RTT_TSF_CMDID,
197 WMI_TLV_SPECTRAL_SCAN_CONF_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SPECTRAL),
198 WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
199 WMI_TLV_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STATS),
200 WMI_TLV_MCC_SCHED_TRAFFIC_STATS_CMDID,
201 WMI_TLV_REQUEST_STATS_EXT_CMDID,
202 WMI_TLV_REQUEST_LINK_STATS_CMDID,
203 WMI_TLV_START_LINK_STATS_CMDID,
204 WMI_TLV_CLEAR_LINK_STATS_CMDID,
205 WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
206 WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
207 WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
208 WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID =
209 WMI_TLV_CMD(WMI_TLV_GRP_NLO_OFL),
210 WMI_TLV_APFIND_CMDID,
211 WMI_TLV_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GTK_OFL),
212 WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CSA_OFL),
213 WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
214 WMI_TLV_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CHATTER),
215 WMI_TLV_CHATTER_ADD_COALESCING_FILTER_CMDID,
216 WMI_TLV_CHATTER_DELETE_COALESCING_FILTER_CMDID,
217 WMI_TLV_CHATTER_COALESCING_QUERY_CMDID,
218 WMI_TLV_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TID_ADDBA),
219 WMI_TLV_PEER_TID_DELBA_CMDID,
220 WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
221 WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
222 WMI_TLV_STA_KEEPALIVE_CMDID,
223 WMI_TLV_BA_REQ_SSN_CMDID,
224 WMI_TLV_ECHO_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MISC),
225 WMI_TLV_PDEV_UTF_CMDID,
226 WMI_TLV_DBGLOG_CFG_CMDID,
227 WMI_TLV_PDEV_QVIT_CMDID,
228 WMI_TLV_PDEV_FTM_INTG_CMDID,
229 WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
230 WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
231 WMI_TLV_FORCE_FW_HANG_CMDID,
232 WMI_TLV_SET_MCASTBCAST_FILTER_CMDID,
233 WMI_TLV_THERMAL_MGMT_CMDID,
234 WMI_TLV_HOST_AUTO_SHUTDOWN_CFG_CMDID,
235 WMI_TLV_TPC_CHAINMASK_CONFIG_CMDID,
236 WMI_TLV_SET_ANTENNA_DIVERSITY_CMDID,
237 WMI_TLV_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GPIO),
238 WMI_TLV_GPIO_OUTPUT_CMDID,
239 WMI_TLV_TXBF_CMDID,
240 WMI_TLV_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID =
241 WMI_TLV_CMD(WMI_TLV_GRP_FWTEST),
242 WMI_TLV_FWTEST_P2P_SET_NOA_PARAM_CMDID,
243 WMI_TLV_UNIT_TEST_CMDID,
244 WMI_TLV_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TDLS),
245 WMI_TLV_TDLS_PEER_UPDATE_CMDID,
246 WMI_TLV_TDLS_SET_OFFCHAN_MODE_CMDID,
247 WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RESMGR),
248 WMI_TLV_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
249 WMI_TLV_RESMGR_SET_CHAN_LATENCY_CMDID,
250 WMI_TLV_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_SMPS),
251 WMI_TLV_STA_SMPS_PARAM_CMDID,
252 WMI_TLV_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WLAN_HB),
253 WMI_TLV_HB_SET_TCP_PARAMS_CMDID,
254 WMI_TLV_HB_SET_TCP_PKT_FILTER_CMDID,
255 WMI_TLV_HB_SET_UDP_PARAMS_CMDID,
256 WMI_TLV_HB_SET_UDP_PKT_FILTER_CMDID,
257 WMI_TLV_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RMC),
258 WMI_TLV_RMC_SET_ACTION_PERIOD_CMDID,
259 WMI_TLV_RMC_CONFIG_CMDID,
260 WMI_TLV_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MHF_OFL),
261 WMI_TLV_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
262 WMI_TLV_BATCH_SCAN_ENABLE_CMDID =
263 WMI_TLV_CMD(WMI_TLV_GRP_LOCATION_SCAN),
264 WMI_TLV_BATCH_SCAN_DISABLE_CMDID,
265 WMI_TLV_BATCH_SCAN_TRIGGER_RESULT_CMDID,
266 WMI_TLV_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OEM),
267 WMI_TLV_NAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_NAN),
268 WMI_TLV_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_COEX),
269 WMI_TLV_CHAN_AVOID_UPDATE_CMDID,
270 WMI_TLV_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OBSS_OFL),
271 WMI_TLV_OBSS_SCAN_DISABLE_CMDID,
272 WMI_TLV_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_LPI),
273 WMI_TLV_LPI_START_SCAN_CMDID,
274 WMI_TLV_LPI_STOP_SCAN_CMDID,
275 WMI_TLV_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_EXTSCAN),
276 WMI_TLV_EXTSCAN_STOP_CMDID,
277 WMI_TLV_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
278 WMI_TLV_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
279 WMI_TLV_EXTSCAN_GET_CACHED_RESULTS_CMDID,
280 WMI_TLV_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
281 WMI_TLV_EXTSCAN_SET_CAPABILITIES_CMDID,
282 WMI_TLV_EXTSCAN_GET_CAPABILITIES_CMDID,
283 WMI_TLV_SET_DHCP_SERVER_OFFLOAD_CMDID =
284 WMI_TLV_CMD(WMI_TLV_GRP_DHCP_OFL),
285 WMI_TLV_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_IPA),
286 WMI_TLV_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MDNS_OFL),
287 WMI_TLV_MDNS_SET_FQDN_CMDID,
288 WMI_TLV_MDNS_SET_RESPONSE_CMDID,
289 WMI_TLV_MDNS_GET_STATS_CMDID,
290 WMI_TLV_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SAP_OFL),
291};
292
293enum wmi_tlv_event_id {
294 WMI_TLV_SERVICE_READY_EVENTID = 0x1,
295 WMI_TLV_READY_EVENTID,
296 WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN),
297 WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV),
298 WMI_TLV_CHAN_INFO_EVENTID,
299 WMI_TLV_PHYERR_EVENTID,
300 WMI_TLV_PDEV_DUMP_EVENTID,
301 WMI_TLV_TX_PAUSE_EVENTID,
302 WMI_TLV_DFS_RADAR_EVENTID,
303 WMI_TLV_PDEV_L1SS_TRACK_EVENTID,
304 WMI_TLV_PDEV_TEMPERATURE_EVENTID,
305 WMI_TLV_VDEV_START_RESP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_VDEV),
306 WMI_TLV_VDEV_STOPPED_EVENTID,
307 WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
308 WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
309 WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER),
310 WMI_TLV_PEER_INFO_EVENTID,
311 WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID,
312 WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID,
313 WMI_TLV_PEER_STATE_EVENTID,
314 WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT),
315 WMI_TLV_HOST_SWBA_EVENTID,
316 WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
317 WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
318 WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
319 WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
320 WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
321 WMI_TLV_BA_RSP_SSN_EVENTID,
322 WMI_TLV_AGGR_STATE_TRIG_EVENTID,
323 WMI_TLV_ROAM_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_ROAM),
324 WMI_TLV_PROFILE_MATCH,
325 WMI_TLV_ROAM_SYNCH_EVENTID,
326 WMI_TLV_P2P_DISC_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_P2P),
327 WMI_TLV_P2P_NOA_EVENTID,
328 WMI_TLV_PDEV_RESUME_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SUSPEND),
329 WMI_TLV_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_WOW),
330 WMI_TLV_D0_WOW_DISABLE_ACK_EVENTID,
331 WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_RTT),
332 WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID,
333 WMI_TLV_RTT_ERROR_REPORT_EVENTID,
334 WMI_TLV_STATS_EXT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_STATS),
335 WMI_TLV_IFACE_LINK_STATS_EVENTID,
336 WMI_TLV_PEER_LINK_STATS_EVENTID,
337 WMI_TLV_RADIO_LINK_STATS_EVENTID,
338 WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
339 WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
340 WMI_TLV_APFIND_EVENTID,
341 WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GTK_OFL),
342 WMI_TLV_GTK_REKEY_FAIL_EVENTID,
343 WMI_TLV_CSA_HANDLING_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CSA_OFL),
344 WMI_TLV_CHATTER_PC_QUERY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CHATTER),
345 WMI_TLV_ECHO_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MISC),
346 WMI_TLV_PDEV_UTF_EVENTID,
347 WMI_TLV_DEBUG_MESG_EVENTID,
348 WMI_TLV_UPDATE_STATS_EVENTID,
349 WMI_TLV_DEBUG_PRINT_EVENTID,
350 WMI_TLV_DCS_INTERFERENCE_EVENTID,
351 WMI_TLV_PDEV_QVIT_EVENTID,
352 WMI_TLV_WLAN_PROFILE_DATA_EVENTID,
353 WMI_TLV_PDEV_FTM_INTG_EVENTID,
354 WMI_TLV_WLAN_FREQ_AVOID_EVENTID,
355 WMI_TLV_VDEV_GET_KEEPALIVE_EVENTID,
356 WMI_TLV_THERMAL_MGMT_EVENTID,
357 WMI_TLV_DIAG_DATA_CONTAINER_EVENTID,
358 WMI_TLV_HOST_AUTO_SHUTDOWN_EVENTID,
359 WMI_TLV_UPDATE_WHAL_MIB_STATS_EVENTID,
360 WMI_TLV_UPDATE_VDEV_RATE_STATS_EVENTID,
361 WMI_TLV_DIAG_EVENTID,
362 WMI_TLV_GPIO_INPUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GPIO),
363 WMI_TLV_UPLOADH_EVENTID,
364 WMI_TLV_CAPTUREH_EVENTID,
365 WMI_TLV_RFKILL_STATE_CHANGE_EVENTID,
366 WMI_TLV_TDLS_PEER_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_TDLS),
367 WMI_TLV_BATCH_SCAN_ENABLED_EVENTID =
368 WMI_TLV_EV(WMI_TLV_GRP_LOCATION_SCAN),
369 WMI_TLV_BATCH_SCAN_RESULT_EVENTID,
370 WMI_TLV_OEM_CAPABILITY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_OEM),
371 WMI_TLV_OEM_MEASUREMENT_REPORT_EVENTID,
372 WMI_TLV_OEM_ERROR_REPORT_EVENTID,
373 WMI_TLV_NAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NAN),
374 WMI_TLV_LPI_RESULT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_LPI),
375 WMI_TLV_LPI_STATUS_EVENTID,
376 WMI_TLV_LPI_HANDOFF_EVENTID,
377 WMI_TLV_EXTSCAN_START_STOP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_EXTSCAN),
378 WMI_TLV_EXTSCAN_OPERATION_EVENTID,
379 WMI_TLV_EXTSCAN_TABLE_USAGE_EVENTID,
380 WMI_TLV_EXTSCAN_CACHED_RESULTS_EVENTID,
381 WMI_TLV_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
382 WMI_TLV_EXTSCAN_HOTLIST_MATCH_EVENTID,
383 WMI_TLV_EXTSCAN_CAPABILITIES_EVENTID,
384 WMI_TLV_MDNS_STATS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MDNS_OFL),
385 WMI_TLV_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SAP_OFL),
386 WMI_TLV_SAP_OFL_DEL_STA_EVENTID,
387};
388
389enum wmi_tlv_pdev_param {
390 WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
391 WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
392 WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
393 WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
394 WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
395 WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
396 WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
397 WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
398 WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
399 WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
400 WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
401 WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
402 WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
403 WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
404 WMI_TLV_PDEV_PARAM_LTR_ENABLE,
405 WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
406 WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
407 WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
408 WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
409 WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
410 WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
411 WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
412 WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
413 WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
414 WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
415 WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
416 WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
417 WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
418 WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
419 WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
420 WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
421 WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
422 WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
423 WMI_TLV_PDEV_PARAM_PMF_QOS,
424 WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
425 WMI_TLV_PDEV_PARAM_DCS,
426 WMI_TLV_PDEV_PARAM_ANI_ENABLE,
427 WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
428 WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
429 WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
430 WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
431 WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
432 WMI_TLV_PDEV_PARAM_PROXY_STA,
433 WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
434 WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
435 WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
436 WMI_TLV_PDEV_PARAM_BURST_DUR,
437 WMI_TLV_PDEV_PARAM_BURST_ENABLE,
438 WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
439 WMI_TLV_PDEV_PARAM_LOW_POWER_RF_ENABLE,
440 WMI_TLV_PDEV_PARAM_L1SS_TRACK,
441 WMI_TLV_PDEV_PARAM_HYST_EN,
442 WMI_TLV_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
443 WMI_TLV_PDEV_PARAM_LED_SYS_STATE,
444 WMI_TLV_PDEV_PARAM_LED_ENABLE,
445 WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
446 WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
447 WMI_TLV_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
448 WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
449 WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
450 WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
451 WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
452};
453
454enum wmi_tlv_vdev_param {
455 WMI_TLV_VDEV_PARAM_RTS_THRESHOLD = 0x1,
456 WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
457 WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
458 WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
459 WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
460 WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
461 WMI_TLV_VDEV_PARAM_SLOT_TIME,
462 WMI_TLV_VDEV_PARAM_PREAMBLE,
463 WMI_TLV_VDEV_PARAM_SWBA_TIME,
464 WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
465 WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
466 WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
467 WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
468 WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
469 WMI_TLV_VDEV_PARAM_WDS,
470 WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
471 WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
472 WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
473 WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
474 WMI_TLV_VDEV_PARAM_FEATURE_WMM,
475 WMI_TLV_VDEV_PARAM_CHWIDTH,
476 WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
477 WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
478 WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
479 WMI_TLV_VDEV_PARAM_MGMT_RATE,
480 WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
481 WMI_TLV_VDEV_PARAM_FIXED_RATE,
482 WMI_TLV_VDEV_PARAM_SGI,
483 WMI_TLV_VDEV_PARAM_LDPC,
484 WMI_TLV_VDEV_PARAM_TX_STBC,
485 WMI_TLV_VDEV_PARAM_RX_STBC,
486 WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
487 WMI_TLV_VDEV_PARAM_DEF_KEYID,
488 WMI_TLV_VDEV_PARAM_NSS,
489 WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
490 WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
491 WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
492 WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
493 WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
494 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
495 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
496 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
497 WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
498 WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
499 WMI_TLV_VDEV_PARAM_TXBF,
500 WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
501 WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
502 WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
503 WMI_TLV_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
504 WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
505 WMI_TLV_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
506 WMI_TLV_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
507 WMI_TLV_VDEV_PARAM_EARLY_RX_SLOP_STEP,
508 WMI_TLV_VDEV_PARAM_EARLY_RX_INIT_SLOP,
509 WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
510 WMI_TLV_VDEV_PARAM_TX_PWRLIMIT,
511 WMI_TLV_VDEV_PARAM_SNR_NUM_FOR_CAL,
512 WMI_TLV_VDEV_PARAM_ROAM_FW_OFFLOAD,
513 WMI_TLV_VDEV_PARAM_ENABLE_RMC,
514 WMI_TLV_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
515 WMI_TLV_VDEV_PARAM_MAX_RATE,
516 WMI_TLV_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
517 WMI_TLV_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
518 WMI_TLV_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
519 WMI_TLV_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
520 WMI_TLV_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
521 WMI_TLV_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
522 WMI_TLV_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
523 WMI_TLV_VDEV_PARAM_INACTIVITY_CNT,
524 WMI_TLV_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
525 WMI_TLV_VDEV_PARAM_DTIM_POLICY,
526 WMI_TLV_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
527 WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
528};
529
530enum wmi_tlv_tag {
531 WMI_TLV_TAG_LAST_RESERVED = 15,
532
533 WMI_TLV_TAG_FIRST_ARRAY_ENUM,
534 WMI_TLV_TAG_ARRAY_UINT32 = WMI_TLV_TAG_FIRST_ARRAY_ENUM,
535 WMI_TLV_TAG_ARRAY_BYTE,
536 WMI_TLV_TAG_ARRAY_STRUCT,
537 WMI_TLV_TAG_ARRAY_FIXED_STRUCT,
538 WMI_TLV_TAG_LAST_ARRAY_ENUM = 31,
539
540 WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT,
541 WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES,
542 WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ,
543 WMI_TLV_TAG_STRUCT_READY_EVENT,
544 WMI_TLV_TAG_STRUCT_SCAN_EVENT,
545 WMI_TLV_TAG_STRUCT_PDEV_TPC_CONFIG_EVENT,
546 WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT,
547 WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR,
548 WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT,
549 WMI_TLV_TAG_STRUCT_VDEV_STOPPED_EVENT,
550 WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_COMPLETE_EVENT,
551 WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT,
552 WMI_TLV_TAG_STRUCT_MGMT_RX_HDR,
553 WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EVENT,
554 WMI_TLV_TAG_STRUCT_TX_DELBA_COMPLETE_EVENT,
555 WMI_TLV_TAG_STRUCT_TX_ADDBA_COMPLETE_EVENT,
556 WMI_TLV_TAG_STRUCT_ROAM_EVENT,
557 WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO,
558 WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO_SECTION_BITMAP,
559 WMI_TLV_TAG_STRUCT_RTT_EVENT_HEADER,
560 WMI_TLV_TAG_STRUCT_RTT_ERROR_REPORT_EVENT,
561 WMI_TLV_TAG_STRUCT_RTT_MEAS_EVENT,
562 WMI_TLV_TAG_STRUCT_ECHO_EVENT,
563 WMI_TLV_TAG_STRUCT_FTM_INTG_EVENT,
564 WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_EVENT,
565 WMI_TLV_TAG_STRUCT_GPIO_INPUT_EVENT,
566 WMI_TLV_TAG_STRUCT_CSA_EVENT,
567 WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_STATUS_EVENT,
568 WMI_TLV_TAG_STRUCT_IGTK_INFO,
569 WMI_TLV_TAG_STRUCT_DCS_INTERFERENCE_EVENT,
570 WMI_TLV_TAG_STRUCT_ATH_DCS_CW_INT,
571 WMI_TLV_TAG_STRUCT_ATH_DCS_WLAN_INT_STAT,
572 WMI_TLV_TAG_STRUCT_WLAN_PROFILE_CTX_T,
573 WMI_TLV_TAG_STRUCT_WLAN_PROFILE_T,
574 WMI_TLV_TAG_STRUCT_PDEV_QVIT_EVENT,
575 WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT,
576 WMI_TLV_TAG_STRUCT_TIM_INFO,
577 WMI_TLV_TAG_STRUCT_P2P_NOA_INFO,
578 WMI_TLV_TAG_STRUCT_STATS_EVENT,
579 WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGES_EVENT,
580 WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGE_DESC,
581 WMI_TLV_TAG_STRUCT_GTK_REKEY_FAIL_EVENT,
582 WMI_TLV_TAG_STRUCT_INIT_CMD,
583 WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG,
584 WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK,
585 WMI_TLV_TAG_STRUCT_START_SCAN_CMD,
586 WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD,
587 WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD,
588 WMI_TLV_TAG_STRUCT_CHANNEL,
589 WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD,
590 WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD,
591 WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD,
592 WMI_TLV_TAG_STRUCT_WMM_PARAMS,
593 WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD,
594 WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD,
595 WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD,
596 WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD,
597 WMI_TLV_TAG_STRUCT_P2P_NOA_DESCRIPTOR,
598 WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE,
599 WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD,
600 WMI_TLV_TAG_STRUCT_VDEV_UP_CMD,
601 WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD,
602 WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD,
603 WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD,
604 WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD,
605 WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD,
606 WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD,
607 WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD,
608 WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD,
609 WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD,
610 WMI_TLV_TAG_STRUCT_VHT_RATE_SET,
611 WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD,
612 WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD,
613 WMI_TLV_TAG_STRUCT_BCN_PRB_INFO,
614 WMI_TLV_TAG_STRUCT_PEER_TID_ADDBA_CMD,
615 WMI_TLV_TAG_STRUCT_PEER_TID_DELBA_CMD,
616 WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD,
617 WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD,
618 WMI_TLV_TAG_STRUCT_STA_DTIM_PS_METHOD_CMD,
619 WMI_TLV_TAG_STRUCT_ROAM_SCAN_MODE,
620 WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_THRESHOLD,
621 WMI_TLV_TAG_STRUCT_ROAM_SCAN_PERIOD,
622 WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
623 WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD,
624 WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD,
625 WMI_TLV_TAG_STRUCT_ADD_BCN_FILTER_CMD,
626 WMI_TLV_TAG_STRUCT_RMV_BCN_FILTER_CMD,
627 WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD,
628 WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
629 WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD,
630 WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM,
631 WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD,
632 WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE,
633 WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE,
634 WMI_TLV_TAG_STRUCT_FTM_INTG_CMD,
635 WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD,
636 WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE,
637 WMI_TLV_TAG_STRUCT_P2P_SET_VENDOR_IE_DATA_CMD,
638 WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD,
639 WMI_TLV_TAG_STRUCT_PEER_RATE_RETRY_SCHED_CMD,
640 WMI_TLV_TAG_STRUCT_WLAN_PROFILE_TRIGGER_CMD,
641 WMI_TLV_TAG_STRUCT_WLAN_PROFILE_SET_HIST_INTVL_CMD,
642 WMI_TLV_TAG_STRUCT_WLAN_PROFILE_GET_PROF_DATA_CMD,
643 WMI_TLV_TAG_STRUCT_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
644 WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD,
645 WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD,
646 WMI_TLV_TAG_STRUCT_RTT_MEASREQ_HEAD,
647 WMI_TLV_TAG_STRUCT_RTT_MEASREQ_BODY,
648 WMI_TLV_TAG_STRUCT_RTT_TSF_CMD,
649 WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD,
650 WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD,
651 WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD,
652 WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD,
653 WMI_TLV_TAG_STRUCT_NLO_CONFIGURED_PARAMETERS,
654 WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD,
655 WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_CHANSWITCH_CMD,
656 WMI_TLV_TAG_STRUCT_CHATTER_SET_MODE_CMD,
657 WMI_TLV_TAG_STRUCT_ECHO_CMD,
658 WMI_TLV_TAG_STRUCT_VDEV_SET_KEEPALIVE_CMD,
659 WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_CMD,
660 WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD,
661 WMI_TLV_TAG_STRUCT_GPIO_CONFIG_CMD,
662 WMI_TLV_TAG_STRUCT_GPIO_OUTPUT_CMD,
663 WMI_TLV_TAG_STRUCT_PEER_ADD_WDS_ENTRY_CMD,
664 WMI_TLV_TAG_STRUCT_PEER_REMOVE_WDS_ENTRY_CMD,
665 WMI_TLV_TAG_STRUCT_BCN_TX_HDR,
666 WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD,
667 WMI_TLV_TAG_STRUCT_MGMT_TX_HDR,
668 WMI_TLV_TAG_STRUCT_ADDBA_CLEAR_RESP_CMD,
669 WMI_TLV_TAG_STRUCT_ADDBA_SEND_CMD,
670 WMI_TLV_TAG_STRUCT_DELBA_SEND_CMD,
671 WMI_TLV_TAG_STRUCT_ADDBA_SETRESPONSE_CMD,
672 WMI_TLV_TAG_STRUCT_SEND_SINGLEAMSDU_CMD,
673 WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD,
674 WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD,
675 WMI_TLV_TAG_STRUCT_PDEV_SET_HT_IE_CMD,
676 WMI_TLV_TAG_STRUCT_PDEV_SET_VHT_IE_CMD,
677 WMI_TLV_TAG_STRUCT_PDEV_SET_DSCP_TID_MAP_CMD,
678 WMI_TLV_TAG_STRUCT_PDEV_GREEN_AP_PS_ENABLE_CMD,
679 WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CONFIG_CMD,
680 WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD,
681 WMI_TLV_TAG_STRUCT_PEER_MCAST_GROUP_CMD,
682 WMI_TLV_TAG_STRUCT_ROAM_AP_PROFILE,
683 WMI_TLV_TAG_STRUCT_AP_PROFILE,
684 WMI_TLV_TAG_STRUCT_SCAN_SCH_PRIORITY_TABLE_CMD,
685 WMI_TLV_TAG_STRUCT_PDEV_DFS_ENABLE_CMD,
686 WMI_TLV_TAG_STRUCT_PDEV_DFS_DISABLE_CMD,
687 WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD,
688 WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T,
689 WMI_TLV_TAG_STRUCT_WOW_IPV4_SYNC_PATTERN_T,
690 WMI_TLV_TAG_STRUCT_WOW_IPV6_SYNC_PATTERN_T,
691 WMI_TLV_TAG_STRUCT_WOW_MAGIC_PATTERN_CMD,
692 WMI_TLV_TAG_STRUCT_SCAN_UPDATE_REQUEST_CMD,
693 WMI_TLV_TAG_STRUCT_CHATTER_PKT_COALESCING_FILTER,
694 WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_ADD_FILTER_CMD,
695 WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_DELETE_FILTER_CMD,
696 WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_QUERY_CMD,
697 WMI_TLV_TAG_STRUCT_TXBF_CMD,
698 WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD,
699 WMI_TLV_TAG_STRUCT_NLO_EVENT,
700 WMI_TLV_TAG_STRUCT_CHATTER_QUERY_REPLY_EVENT,
701 WMI_TLV_TAG_STRUCT_UPLOAD_H_HDR,
702 WMI_TLV_TAG_STRUCT_CAPTURE_H_EVENT_HDR,
703 WMI_TLV_TAG_STRUCT_VDEV_WNM_SLEEPMODE_CMD,
704 WMI_TLV_TAG_STRUCT_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
705 WMI_TLV_TAG_STRUCT_VDEV_WMM_ADDTS_CMD,
706 WMI_TLV_TAG_STRUCT_VDEV_WMM_DELTS_CMD,
707 WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD,
708 WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD,
709 WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD,
710 WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT,
711 WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES,
712 WMI_TLV_TAG_STRUCT_VDEV_MCC_SET_TBTT_MODE_CMD,
713 WMI_TLV_TAG_STRUCT_ROAM_CHAN_LIST,
714 WMI_TLV_TAG_STRUCT_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
715 WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD,
716 WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
717 WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_LATENCY_CMD,
718 WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD,
719 WMI_TLV_TAG_STRUCT_BA_RSP_SSN_EVENT,
720 WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_CMD,
721 WMI_TLV_TAG_STRUCT_SET_MCASTBCAST_FILTER_CMD,
722 WMI_TLV_TAG_STRUCT_P2P_SET_OPPPS_CMD,
723 WMI_TLV_TAG_STRUCT_P2P_SET_NOA_CMD,
724 WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
725 WMI_TLV_TAG_STRUCT_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
726 WMI_TLV_TAG_STRUCT_STA_SMPS_PARAM_CMD,
727 WMI_TLV_TAG_STRUCT_VDEV_SET_GTX_PARAMS_CMD,
728 WMI_TLV_TAG_STRUCT_MCC_SCHED_TRAFFIC_STATS_CMD,
729 WMI_TLV_TAG_STRUCT_MCC_SCHED_STA_TRAFFIC_STATS,
730 WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT,
731 WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT,
732 WMI_TLV_TAG_STRUCT_HB_SET_ENABLE_CMD,
733 WMI_TLV_TAG_STRUCT_HB_SET_TCP_PARAMS_CMD,
734 WMI_TLV_TAG_STRUCT_HB_SET_TCP_PKT_FILTER_CMD,
735 WMI_TLV_TAG_STRUCT_HB_SET_UDP_PARAMS_CMD,
736 WMI_TLV_TAG_STRUCT_HB_SET_UDP_PKT_FILTER_CMD,
737 WMI_TLV_TAG_STRUCT_HB_IND_EVENT,
738 WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT,
739 WMI_TLV_TAG_STRUCT_RFKILL_EVENT,
740 WMI_TLV_TAG_STRUCT_DFS_RADAR_EVENT,
741 WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_ENA_CMD,
742 WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_DIS_CMD,
743 WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_SCAN_LIST,
744 WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_NETWORK_INFO,
745 WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLE_CMD,
746 WMI_TLV_TAG_STRUCT_BATCH_SCAN_DISABLE_CMD,
747 WMI_TLV_TAG_STRUCT_BATCH_SCAN_TRIGGER_RESULT_CMD,
748 WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLED_EVENT,
749 WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_EVENT,
750 WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_START_CMD,
751 WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_STOP_CMD,
752 WMI_TLV_TAG_STRUCT_THERMAL_MGMT_CMD,
753 WMI_TLV_TAG_STRUCT_THERMAL_MGMT_EVENT,
754 WMI_TLV_TAG_STRUCT_PEER_INFO_REQ_CMD,
755 WMI_TLV_TAG_STRUCT_PEER_INFO_EVENT,
756 WMI_TLV_TAG_STRUCT_PEER_INFO,
757 WMI_TLV_TAG_STRUCT_PEER_TX_FAIL_CNT_THR_EVENT,
758 WMI_TLV_TAG_STRUCT_RMC_SET_MODE_CMD,
759 WMI_TLV_TAG_STRUCT_RMC_SET_ACTION_PERIOD_CMD,
760 WMI_TLV_TAG_STRUCT_RMC_CONFIG_CMD,
761 WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_SET_MODE_CMD,
762 WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
763 WMI_TLV_TAG_STRUCT_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
764 WMI_TLV_TAG_STRUCT_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
765 WMI_TLV_TAG_STRUCT_NAN_CMD_PARAM,
766 WMI_TLV_TAG_STRUCT_NAN_EVENT_HDR,
767 WMI_TLV_TAG_STRUCT_PDEV_L1SS_TRACK_EVENT,
768 WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT,
769 WMI_TLV_TAG_STRUCT_MODEM_POWER_STATE_CMD_PARAM,
770 WMI_TLV_TAG_STRUCT_PEER_GET_ESTIMATED_LINKSPEED_CMD,
771 WMI_TLV_TAG_STRUCT_PEER_ESTIMATED_LINKSPEED_EVENT,
772 WMI_TLV_TAG_STRUCT_AGGR_STATE_TRIG_EVENT,
773 WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
774 WMI_TLV_TAG_STRUCT_ROAM_SCAN_CMD,
775 WMI_TLV_TAG_STRUCT_REQ_STATS_EXT_CMD,
776 WMI_TLV_TAG_STRUCT_STATS_EXT_EVENT,
777 WMI_TLV_TAG_STRUCT_OBSS_SCAN_ENABLE_CMD,
778 WMI_TLV_TAG_STRUCT_OBSS_SCAN_DISABLE_CMD,
779 WMI_TLV_TAG_STRUCT_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
780 WMI_TLV_TAG_STRUCT_PDEV_SET_LED_CONFIG_CMD,
781 WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_CFG_CMD,
782 WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_EVENT,
783 WMI_TLV_TAG_STRUCT_UPDATE_WHAL_MIB_STATS_EVENT,
784 WMI_TLV_TAG_STRUCT_CHAN_AVOID_UPDATE_CMD_PARAM,
785 WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_PKT_PATTERN_T,
786 WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_TMR_PATTERN_T,
787 WMI_TLV_TAG_STRUCT_WOW_IOAC_ADD_KEEPALIVE_CMD,
788 WMI_TLV_TAG_STRUCT_WOW_IOAC_DEL_KEEPALIVE_CMD,
789 WMI_TLV_TAG_STRUCT_WOW_IOAC_KEEPALIVE_T,
790 WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_ADD_PATTERN_CMD,
791 WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_DEL_PATTERN_CMD,
792 WMI_TLV_TAG_STRUCT_START_LINK_STATS_CMD,
793 WMI_TLV_TAG_STRUCT_CLEAR_LINK_STATS_CMD,
794 WMI_TLV_TAG_STRUCT_REQUEST_LINK_STATS_CMD,
795 WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS_EVENT,
796 WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS_EVENT,
797 WMI_TLV_TAG_STRUCT_PEER_STATS_EVENT,
798 WMI_TLV_TAG_STRUCT_CHANNEL_STATS,
799 WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS,
800 WMI_TLV_TAG_STRUCT_RATE_STATS,
801 WMI_TLV_TAG_STRUCT_PEER_LINK_STATS,
802 WMI_TLV_TAG_STRUCT_WMM_AC_STATS,
803 WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS,
804 WMI_TLV_TAG_STRUCT_LPI_MGMT_SNOOPING_CONFIG_CMD,
805 WMI_TLV_TAG_STRUCT_LPI_START_SCAN_CMD,
806 WMI_TLV_TAG_STRUCT_LPI_STOP_SCAN_CMD,
807 WMI_TLV_TAG_STRUCT_LPI_RESULT_EVENT,
808 WMI_TLV_TAG_STRUCT_PEER_STATE_EVENT,
809 WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CMD,
810 WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CHANNEL_EVENT,
811 WMI_TLV_TAG_STRUCT_EXTSCAN_START_CMD,
812 WMI_TLV_TAG_STRUCT_EXTSCAN_STOP_CMD,
813 WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
814 WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
815 WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
816 WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CACHED_RESULTS_CMD,
817 WMI_TLV_TAG_STRUCT_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
818 WMI_TLV_TAG_STRUCT_EXTSCAN_SET_CAPABILITIES_CMD,
819 WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CAPABILITIES_CMD,
820 WMI_TLV_TAG_STRUCT_EXTSCAN_OPERATION_EVENT,
821 WMI_TLV_TAG_STRUCT_EXTSCAN_START_STOP_EVENT,
822 WMI_TLV_TAG_STRUCT_EXTSCAN_TABLE_USAGE_EVENT,
823 WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
824 WMI_TLV_TAG_STRUCT_EXTSCAN_RSSI_INFO_EVENT,
825 WMI_TLV_TAG_STRUCT_EXTSCAN_CACHED_RESULTS_EVENT,
826 WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
827 WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
828 WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MATCH_EVENT,
829 WMI_TLV_TAG_STRUCT_EXTSCAN_CAPABILITIES_EVENT,
830 WMI_TLV_TAG_STRUCT_EXTSCAN_CACHE_CAPABILITIES_EVENT,
831 WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
832 WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
833 WMI_TLV_TAG_STRUCT_D0_WOW_ENABLE_DISABLE_CMD,
834 WMI_TLV_TAG_STRUCT_D0_WOW_DISABLE_ACK_EVENT,
835 WMI_TLV_TAG_STRUCT_UNIT_TEST_CMD,
836 WMI_TLV_TAG_STRUCT_ROAM_OFFLOAD_TLV_PARAM,
837 WMI_TLV_TAG_STRUCT_ROAM_11I_OFFLOAD_TLV_PARAM,
838 WMI_TLV_TAG_STRUCT_ROAM_11R_OFFLOAD_TLV_PARAM,
839 WMI_TLV_TAG_STRUCT_ROAM_ESE_OFFLOAD_TLV_PARAM,
840 WMI_TLV_TAG_STRUCT_ROAM_SYNCH_EVENT,
841 WMI_TLV_TAG_STRUCT_ROAM_SYNCH_COMPLETE,
842 WMI_TLV_TAG_STRUCT_EXTWOW_ENABLE_CMD,
843 WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
844 WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
845 WMI_TLV_TAG_STRUCT_LPI_STATUS_EVENT,
846 WMI_TLV_TAG_STRUCT_LPI_HANDOFF_EVENT,
847 WMI_TLV_TAG_STRUCT_VDEV_RATE_STATS_EVENT,
848 WMI_TLV_TAG_STRUCT_VDEV_RATE_HT_INFO,
849 WMI_TLV_TAG_STRUCT_RIC_REQUEST,
850 WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD,
851 WMI_TLV_TAG_STRUCT_PDEV_TEMPERATURE_EVENT,
852 WMI_TLV_TAG_STRUCT_SET_DHCP_SERVER_OFFLOAD_CMD,
853 WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG_CMD,
854 WMI_TLV_TAG_STRUCT_RIC_TSPEC,
855 WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG,
856 WMI_TLV_TAG_STRUCT_IPA_OFFLOAD_CMD,
857 WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD,
858 WMI_TLV_TAG_STRUCT_KEY_MATERIAL,
859 WMI_TLV_TAG_STRUCT_TDLS_SET_OFFCHAN_MODE_CMD,
860 WMI_TLV_TAG_STRUCT_SET_LED_FLASHING_CMD,
861 WMI_TLV_TAG_STRUCT_MDNS_OFFLOAD_CMD,
862 WMI_TLV_TAG_STRUCT_MDNS_SET_FQDN_CMD,
863 WMI_TLV_TAG_STRUCT_MDNS_SET_RESP_CMD,
864 WMI_TLV_TAG_STRUCT_MDNS_GET_STATS_CMD,
865 WMI_TLV_TAG_STRUCT_MDNS_STATS_EVENT,
866 WMI_TLV_TAG_STRUCT_ROAM_INVOKE_CMD,
867 WMI_TLV_TAG_STRUCT_PDEV_RESUME_EVENT,
868 WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_DIVERSITY_CMD,
869 WMI_TLV_TAG_STRUCT_SAP_OFL_ENABLE_CMD,
870 WMI_TLV_TAG_STRUCT_SAP_OFL_ADD_STA_EVENT,
871 WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
872 WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
873 WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
874
875 WMI_TLV_TAG_MAX
876};
877
878enum wmi_tlv_service {
879 WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
880 WMI_TLV_SERVICE_SCAN_OFFLOAD,
881 WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
882 WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
883 WMI_TLV_SERVICE_STA_PWRSAVE,
884 WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
885 WMI_TLV_SERVICE_AP_UAPSD,
886 WMI_TLV_SERVICE_AP_DFS,
887 WMI_TLV_SERVICE_11AC,
888 WMI_TLV_SERVICE_BLOCKACK,
889 WMI_TLV_SERVICE_PHYERR,
890 WMI_TLV_SERVICE_BCN_FILTER,
891 WMI_TLV_SERVICE_RTT,
892 WMI_TLV_SERVICE_WOW,
893 WMI_TLV_SERVICE_RATECTRL_CACHE,
894 WMI_TLV_SERVICE_IRAM_TIDS,
895 WMI_TLV_SERVICE_ARPNS_OFFLOAD,
896 WMI_TLV_SERVICE_NLO,
897 WMI_TLV_SERVICE_GTK_OFFLOAD,
898 WMI_TLV_SERVICE_SCAN_SCH,
899 WMI_TLV_SERVICE_CSA_OFFLOAD,
900 WMI_TLV_SERVICE_CHATTER,
901 WMI_TLV_SERVICE_COEX_FREQAVOID,
902 WMI_TLV_SERVICE_PACKET_POWER_SAVE,
903 WMI_TLV_SERVICE_FORCE_FW_HANG,
904 WMI_TLV_SERVICE_GPIO,
905 WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
906 WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
907 WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
908 WMI_TLV_SERVICE_STA_KEEP_ALIVE,
909 WMI_TLV_SERVICE_TX_ENCAP,
910 WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
911 WMI_TLV_SERVICE_EARLY_RX,
912 WMI_TLV_SERVICE_STA_SMPS,
913 WMI_TLV_SERVICE_FWTEST,
914 WMI_TLV_SERVICE_STA_WMMAC,
915 WMI_TLV_SERVICE_TDLS,
916 WMI_TLV_SERVICE_BURST,
917 WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
918 WMI_TLV_SERVICE_ADAPTIVE_OCS,
919 WMI_TLV_SERVICE_BA_SSN_SUPPORT,
920 WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
921 WMI_TLV_SERVICE_WLAN_HB,
922 WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
923 WMI_TLV_SERVICE_BATCH_SCAN,
924 WMI_TLV_SERVICE_QPOWER,
925 WMI_TLV_SERVICE_PLMREQ,
926 WMI_TLV_SERVICE_THERMAL_MGMT,
927 WMI_TLV_SERVICE_RMC,
928 WMI_TLV_SERVICE_MHF_OFFLOAD,
929 WMI_TLV_SERVICE_COEX_SAR,
930 WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
931 WMI_TLV_SERVICE_NAN,
932 WMI_TLV_SERVICE_L1SS_STAT,
933 WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
934 WMI_TLV_SERVICE_OBSS_SCAN,
935 WMI_TLV_SERVICE_TDLS_OFFCHAN,
936 WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
937 WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
938 WMI_TLV_SERVICE_IBSS_PWRSAVE,
939 WMI_TLV_SERVICE_LPASS,
940 WMI_TLV_SERVICE_EXTSCAN,
941 WMI_TLV_SERVICE_D0WOW,
942 WMI_TLV_SERVICE_HSOFFLOAD,
943 WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
944 WMI_TLV_SERVICE_RX_FULL_REORDER,
945 WMI_TLV_SERVICE_DHCP_OFFLOAD,
946 WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
947 WMI_TLV_SERVICE_MDNS_OFFLOAD,
948 WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
949};
950
951#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
952 ((svc_id) < (len) && \
953 __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
954 BIT((svc_id)%(sizeof(u32))))
955
956#define SVCMAP(x, y, len) \
957 do { \
958 if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
959 __set_bit(y, out); \
960 } while (0)
961
962static inline void
963wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
964{
965 SVCMAP(WMI_TLV_SERVICE_BEACON_OFFLOAD,
966 WMI_SERVICE_BEACON_OFFLOAD, len);
967 SVCMAP(WMI_TLV_SERVICE_SCAN_OFFLOAD,
968 WMI_SERVICE_SCAN_OFFLOAD, len);
969 SVCMAP(WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
970 WMI_SERVICE_ROAM_SCAN_OFFLOAD, len);
971 SVCMAP(WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
972 WMI_SERVICE_BCN_MISS_OFFLOAD, len);
973 SVCMAP(WMI_TLV_SERVICE_STA_PWRSAVE,
974 WMI_SERVICE_STA_PWRSAVE, len);
975 SVCMAP(WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
976 WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
977 SVCMAP(WMI_TLV_SERVICE_AP_UAPSD,
978 WMI_SERVICE_AP_UAPSD, len);
979 SVCMAP(WMI_TLV_SERVICE_AP_DFS,
980 WMI_SERVICE_AP_DFS, len);
981 SVCMAP(WMI_TLV_SERVICE_11AC,
982 WMI_SERVICE_11AC, len);
983 SVCMAP(WMI_TLV_SERVICE_BLOCKACK,
984 WMI_SERVICE_BLOCKACK, len);
985 SVCMAP(WMI_TLV_SERVICE_PHYERR,
986 WMI_SERVICE_PHYERR, len);
987 SVCMAP(WMI_TLV_SERVICE_BCN_FILTER,
988 WMI_SERVICE_BCN_FILTER, len);
989 SVCMAP(WMI_TLV_SERVICE_RTT,
990 WMI_SERVICE_RTT, len);
991 SVCMAP(WMI_TLV_SERVICE_WOW,
992 WMI_SERVICE_WOW, len);
993 SVCMAP(WMI_TLV_SERVICE_RATECTRL_CACHE,
994 WMI_SERVICE_RATECTRL_CACHE, len);
995 SVCMAP(WMI_TLV_SERVICE_IRAM_TIDS,
996 WMI_SERVICE_IRAM_TIDS, len);
997 SVCMAP(WMI_TLV_SERVICE_ARPNS_OFFLOAD,
998 WMI_SERVICE_ARPNS_OFFLOAD, len);
999 SVCMAP(WMI_TLV_SERVICE_NLO,
1000 WMI_SERVICE_NLO, len);
1001 SVCMAP(WMI_TLV_SERVICE_GTK_OFFLOAD,
1002 WMI_SERVICE_GTK_OFFLOAD, len);
1003 SVCMAP(WMI_TLV_SERVICE_SCAN_SCH,
1004 WMI_SERVICE_SCAN_SCH, len);
1005 SVCMAP(WMI_TLV_SERVICE_CSA_OFFLOAD,
1006 WMI_SERVICE_CSA_OFFLOAD, len);
1007 SVCMAP(WMI_TLV_SERVICE_CHATTER,
1008 WMI_SERVICE_CHATTER, len);
1009 SVCMAP(WMI_TLV_SERVICE_COEX_FREQAVOID,
1010 WMI_SERVICE_COEX_FREQAVOID, len);
1011 SVCMAP(WMI_TLV_SERVICE_PACKET_POWER_SAVE,
1012 WMI_SERVICE_PACKET_POWER_SAVE, len);
1013 SVCMAP(WMI_TLV_SERVICE_FORCE_FW_HANG,
1014 WMI_SERVICE_FORCE_FW_HANG, len);
1015 SVCMAP(WMI_TLV_SERVICE_GPIO,
1016 WMI_SERVICE_GPIO, len);
1017 SVCMAP(WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
1018 WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
1019 SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
1020 WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
1021 SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
1022 WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
1023 SVCMAP(WMI_TLV_SERVICE_STA_KEEP_ALIVE,
1024 WMI_SERVICE_STA_KEEP_ALIVE, len);
1025 SVCMAP(WMI_TLV_SERVICE_TX_ENCAP,
1026 WMI_SERVICE_TX_ENCAP, len);
1027 SVCMAP(WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
1028 WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
1029 SVCMAP(WMI_TLV_SERVICE_EARLY_RX,
1030 WMI_SERVICE_EARLY_RX, len);
1031 SVCMAP(WMI_TLV_SERVICE_STA_SMPS,
1032 WMI_SERVICE_STA_SMPS, len);
1033 SVCMAP(WMI_TLV_SERVICE_FWTEST,
1034 WMI_SERVICE_FWTEST, len);
1035 SVCMAP(WMI_TLV_SERVICE_STA_WMMAC,
1036 WMI_SERVICE_STA_WMMAC, len);
1037 SVCMAP(WMI_TLV_SERVICE_TDLS,
1038 WMI_SERVICE_TDLS, len);
1039 SVCMAP(WMI_TLV_SERVICE_BURST,
1040 WMI_SERVICE_BURST, len);
1041 SVCMAP(WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
1042 WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, len);
1043 SVCMAP(WMI_TLV_SERVICE_ADAPTIVE_OCS,
1044 WMI_SERVICE_ADAPTIVE_OCS, len);
1045 SVCMAP(WMI_TLV_SERVICE_BA_SSN_SUPPORT,
1046 WMI_SERVICE_BA_SSN_SUPPORT, len);
1047 SVCMAP(WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
1048 WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE, len);
1049 SVCMAP(WMI_TLV_SERVICE_WLAN_HB,
1050 WMI_SERVICE_WLAN_HB, len);
1051 SVCMAP(WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
1052 WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, len);
1053 SVCMAP(WMI_TLV_SERVICE_BATCH_SCAN,
1054 WMI_SERVICE_BATCH_SCAN, len);
1055 SVCMAP(WMI_TLV_SERVICE_QPOWER,
1056 WMI_SERVICE_QPOWER, len);
1057 SVCMAP(WMI_TLV_SERVICE_PLMREQ,
1058 WMI_SERVICE_PLMREQ, len);
1059 SVCMAP(WMI_TLV_SERVICE_THERMAL_MGMT,
1060 WMI_SERVICE_THERMAL_MGMT, len);
1061 SVCMAP(WMI_TLV_SERVICE_RMC,
1062 WMI_SERVICE_RMC, len);
1063 SVCMAP(WMI_TLV_SERVICE_MHF_OFFLOAD,
1064 WMI_SERVICE_MHF_OFFLOAD, len);
1065 SVCMAP(WMI_TLV_SERVICE_COEX_SAR,
1066 WMI_SERVICE_COEX_SAR, len);
1067 SVCMAP(WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
1068 WMI_SERVICE_BCN_TXRATE_OVERRIDE, len);
1069 SVCMAP(WMI_TLV_SERVICE_NAN,
1070 WMI_SERVICE_NAN, len);
1071 SVCMAP(WMI_TLV_SERVICE_L1SS_STAT,
1072 WMI_SERVICE_L1SS_STAT, len);
1073 SVCMAP(WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
1074 WMI_SERVICE_ESTIMATE_LINKSPEED, len);
1075 SVCMAP(WMI_TLV_SERVICE_OBSS_SCAN,
1076 WMI_SERVICE_OBSS_SCAN, len);
1077 SVCMAP(WMI_TLV_SERVICE_TDLS_OFFCHAN,
1078 WMI_SERVICE_TDLS_OFFCHAN, len);
1079 SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
1080 WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
1081 SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
1082 WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
1083 SVCMAP(WMI_TLV_SERVICE_IBSS_PWRSAVE,
1084 WMI_SERVICE_IBSS_PWRSAVE, len);
1085 SVCMAP(WMI_TLV_SERVICE_LPASS,
1086 WMI_SERVICE_LPASS, len);
1087 SVCMAP(WMI_TLV_SERVICE_EXTSCAN,
1088 WMI_SERVICE_EXTSCAN, len);
1089 SVCMAP(WMI_TLV_SERVICE_D0WOW,
1090 WMI_SERVICE_D0WOW, len);
1091 SVCMAP(WMI_TLV_SERVICE_HSOFFLOAD,
1092 WMI_SERVICE_HSOFFLOAD, len);
1093 SVCMAP(WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
1094 WMI_SERVICE_ROAM_HO_OFFLOAD, len);
1095 SVCMAP(WMI_TLV_SERVICE_RX_FULL_REORDER,
1096 WMI_SERVICE_RX_FULL_REORDER, len);
1097 SVCMAP(WMI_TLV_SERVICE_DHCP_OFFLOAD,
1098 WMI_SERVICE_DHCP_OFFLOAD, len);
1099 SVCMAP(WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
1100 WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, len);
1101 SVCMAP(WMI_TLV_SERVICE_MDNS_OFFLOAD,
1102 WMI_SERVICE_MDNS_OFFLOAD, len);
1103 SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
1104 WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
1105}
1106
1107#undef SVCMAP
1108
1109struct wmi_tlv {
1110 __le16 len;
1111 __le16 tag;
1112 u8 value[0];
1113} __packed;
1114
1115#define WMI_TLV_MGMT_RX_NUM_RSSI 4
1116
1117struct wmi_tlv_mgmt_rx_ev {
1118 __le32 channel;
1119 __le32 snr;
1120 __le32 rate;
1121 __le32 phy_mode;
1122 __le32 buf_len;
1123 __le32 status;
1124 __le32 rssi[WMI_TLV_MGMT_RX_NUM_RSSI];
1125} __packed;
1126
1127struct wmi_tlv_abi_version {
1128 __le32 abi_ver0;
1129 __le32 abi_ver1;
1130 __le32 abi_ver_ns0;
1131 __le32 abi_ver_ns1;
1132 __le32 abi_ver_ns2;
1133 __le32 abi_ver_ns3;
1134} __packed;
1135
1136enum wmi_tlv_hw_bd_id {
1137 WMI_TLV_HW_BD_LEGACY = 0,
1138 WMI_TLV_HW_BD_QCA6174 = 1,
1139 WMI_TLV_HW_BD_QCA2582 = 2,
1140};
1141
1142struct wmi_tlv_hw_bd_info {
1143 u8 rev;
1144 u8 project_id;
1145 u8 custom_id;
1146 u8 reference_design_id;
1147} __packed;
1148
1149struct wmi_tlv_svc_rdy_ev {
1150 __le32 fw_build_vers;
1151 struct wmi_tlv_abi_version abi;
1152 __le32 phy_capability;
1153 __le32 max_frag_entry;
1154 __le32 num_rf_chains;
1155 __le32 ht_cap_info;
1156 __le32 vht_cap_info;
1157 __le32 vht_supp_mcs;
1158 __le32 hw_min_tx_power;
1159 __le32 hw_max_tx_power;
1160 __le32 sys_cap_info;
1161 __le32 min_pkt_size_enable;
1162 __le32 max_bcn_ie_size;
1163 __le32 num_mem_reqs;
1164 __le32 max_num_scan_chans;
1165 __le32 hw_bd_id; /* 0 means hw_bd_info is invalid */
1166 struct wmi_tlv_hw_bd_info hw_bd_info[5];
1167} __packed;
1168
1169struct wmi_tlv_rdy_ev {
1170 struct wmi_tlv_abi_version abi;
1171 struct wmi_mac_addr mac_addr;
1172 __le32 status;
1173} __packed;
1174
1175struct wmi_tlv_resource_config {
1176 __le32 num_vdevs;
1177 __le32 num_peers;
1178 __le32 num_offload_peers;
1179 __le32 num_offload_reorder_bufs;
1180 __le32 num_peer_keys;
1181 __le32 num_tids;
1182 __le32 ast_skid_limit;
1183 __le32 tx_chain_mask;
1184 __le32 rx_chain_mask;
1185 __le32 rx_timeout_pri[4];
1186 __le32 rx_decap_mode;
1187 __le32 scan_max_pending_reqs;
1188 __le32 bmiss_offload_max_vdev;
1189 __le32 roam_offload_max_vdev;
1190 __le32 roam_offload_max_ap_profiles;
1191 __le32 num_mcast_groups;
1192 __le32 num_mcast_table_elems;
1193 __le32 mcast2ucast_mode;
1194 __le32 tx_dbg_log_size;
1195 __le32 num_wds_entries;
1196 __le32 dma_burst_size;
1197 __le32 mac_aggr_delim;
1198 __le32 rx_skip_defrag_timeout_dup_detection_check;
1199 __le32 vow_config;
1200 __le32 gtk_offload_max_vdev;
1201 __le32 num_msdu_desc;
1202 __le32 max_frag_entries;
1203 __le32 num_tdls_vdevs;
1204 __le32 num_tdls_conn_table_entries;
1205 __le32 beacon_tx_offload_max_vdev;
1206 __le32 num_multicast_filter_entries;
1207 __le32 num_wow_filters;
1208 __le32 num_keep_alive_pattern;
1209 __le32 keep_alive_pattern_size;
1210 __le32 max_tdls_concurrent_sleep_sta;
1211 __le32 max_tdls_concurrent_buffer_sta;
1212} __packed;
1213
1214struct wmi_tlv_init_cmd {
1215 struct wmi_tlv_abi_version abi;
1216 __le32 num_host_mem_chunks;
1217} __packed;
1218
1219struct wmi_tlv_pdev_set_param_cmd {
1220 __le32 pdev_id; /* not used yet */
1221 __le32 param_id;
1222 __le32 param_value;
1223} __packed;
1224
1225struct wmi_tlv_pdev_set_rd_cmd {
1226 __le32 pdev_id; /* not used yet */
1227 __le32 regd;
1228 __le32 regd_2ghz;
1229 __le32 regd_5ghz;
1230 __le32 conform_limit_2ghz;
1231 __le32 conform_limit_5ghz;
1232} __packed;
1233
1234struct wmi_tlv_scan_chan_list_cmd {
1235 __le32 num_scan_chans;
1236} __packed;
1237
1238struct wmi_tlv_start_scan_cmd {
1239 struct wmi_start_scan_common common;
1240 __le32 burst_duration_ms;
1241 __le32 num_channels;
1242 __le32 num_bssids;
1243 __le32 num_ssids;
1244 __le32 ie_len;
1245 __le32 num_probes;
1246} __packed;
1247
1248struct wmi_tlv_vdev_start_cmd {
1249 __le32 vdev_id;
1250 __le32 requestor_id;
1251 __le32 bcn_intval;
1252 __le32 dtim_period;
1253 __le32 flags;
1254 struct wmi_ssid ssid;
1255 __le32 bcn_tx_rate;
1256 __le32 bcn_tx_power;
1257 __le32 num_noa_descr;
1258 __le32 disable_hw_ack;
1259} __packed;
1260
1261enum {
1262 WMI_TLV_PEER_TYPE_DEFAULT = 0, /* generic / non-BSS / self-peer */
1263 WMI_TLV_PEER_TYPE_BSS = 1,
1264 WMI_TLV_PEER_TYPE_TDLS = 2,
1265 WMI_TLV_PEER_TYPE_HOST_MAX = 127,
1266 WMI_TLV_PEER_TYPE_ROAMOFFLOAD_TMP = 128,
1267};
1268
1269struct wmi_tlv_peer_create_cmd {
1270 __le32 vdev_id;
1271 struct wmi_mac_addr peer_addr;
1272 __le32 peer_type;
1273} __packed;
1274
1275struct wmi_tlv_peer_assoc_cmd {
1276 struct wmi_mac_addr mac_addr;
1277 __le32 vdev_id;
1278 __le32 new_assoc;
1279 __le32 assoc_id;
1280 __le32 flags;
1281 __le32 caps;
1282 __le32 listen_intval;
1283 __le32 ht_caps;
1284 __le32 max_mpdu;
1285 __le32 mpdu_density;
1286 __le32 rate_caps;
1287 __le32 nss;
1288 __le32 vht_caps;
1289 __le32 phy_mode;
1290 __le32 ht_info[2];
1291 __le32 num_legacy_rates;
1292 __le32 num_ht_rates;
1293} __packed;
1294
1295struct wmi_tlv_pdev_suspend {
1296 __le32 pdev_id; /* not used yet */
1297 __le32 opt;
1298} __packed;
1299
1300struct wmi_tlv_pdev_set_wmm_cmd {
1301 __le32 pdev_id; /* not used yet */
1302 __le32 dg_type; /* no idea.. */
1303} __packed;
1304
1305struct wmi_tlv_vdev_set_wmm_cmd {
1306 __le32 vdev_id;
1307} __packed;
1308
1309struct wmi_tlv_phyerr_ev {
1310 __le32 num_phyerrs;
1311 __le32 tsf_l32;
1312 __le32 tsf_u32;
1313 __le32 buf_len;
1314} __packed;
1315
1316enum wmi_tlv_dbglog_param {
1317 WMI_TLV_DBGLOG_PARAM_LOG_LEVEL = 1,
1318 WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE,
1319 WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE,
1320 WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE_BITMAP,
1321 WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE_BITMAP,
1322};
1323
1324enum wmi_tlv_dbglog_log_level {
1325 WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE = 0,
1326 WMI_TLV_DBGLOG_LOG_LEVEL_INFO,
1327 WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_1,
1328 WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_2,
1329 WMI_TLV_DBGLOG_LOG_LEVEL_WARN,
1330 WMI_TLV_DBGLOG_LOG_LEVEL_ERR,
1331};
1332
1333#define WMI_TLV_DBGLOG_BITMAP_MAX_IDS 512
1334#define WMI_TLV_DBGLOG_BITMAP_MAX_WORDS (WMI_TLV_DBGLOG_BITMAP_MAX_IDS / \
1335 sizeof(__le32))
1336#define WMI_TLV_DBGLOG_ALL_MODULES 0xffff
1337#define WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(module_id, log_level) \
1338 (((module_id << 16) & 0xffff0000) | \
1339 ((log_level << 0) & 0x000000ff))
1340
1341struct wmi_tlv_dbglog_cmd {
1342 __le32 param;
1343 __le32 value;
1344} __packed;
1345
1346struct wmi_tlv_resume_cmd {
1347 __le32 reserved;
1348} __packed;
1349
1350struct wmi_tlv_req_stats_cmd {
1351 __le32 stats_id; /* wmi_stats_id */
1352 __le32 vdev_id;
1353 struct wmi_mac_addr peer_macaddr;
1354} __packed;
1355
1356struct wmi_tlv_vdev_stats {
1357 __le32 vdev_id;
1358 __le32 beacon_snr;
1359 __le32 data_snr;
1360 __le32 num_tx_frames[4]; /* per-AC */
1361 __le32 num_rx_frames;
1362 __le32 num_tx_frames_retries[4];
1363 __le32 num_tx_frames_failures[4];
1364 __le32 num_rts_fail;
1365 __le32 num_rts_success;
1366 __le32 num_rx_err;
1367 __le32 num_rx_discard;
1368 __le32 num_tx_not_acked;
1369 __le32 tx_rate_history[10];
1370 __le32 beacon_rssi_history[10];
1371} __packed;
1372
1373struct wmi_tlv_pktlog_enable {
1374 __le32 reserved;
1375 __le32 filter;
1376} __packed;
1377
1378struct wmi_tlv_pktlog_disable {
1379 __le32 reserved;
1380} __packed;
1381
1382enum wmi_tlv_bcn_tx_status {
1383 WMI_TLV_BCN_TX_STATUS_OK,
1384 WMI_TLV_BCN_TX_STATUS_XRETRY,
1385 WMI_TLV_BCN_TX_STATUS_DROP,
1386 WMI_TLV_BCN_TX_STATUS_FILTERED,
1387};
1388
1389struct wmi_tlv_bcn_tx_status_ev {
1390 __le32 vdev_id;
1391 __le32 tx_status;
1392} __packed;
1393
1394struct wmi_tlv_bcn_prb_info {
1395 __le32 caps;
1396 __le32 erp;
1397 u8 ies[0];
1398} __packed;
1399
1400struct wmi_tlv_bcn_tmpl_cmd {
1401 __le32 vdev_id;
1402 __le32 tim_ie_offset;
1403 __le32 buf_len;
1404} __packed;
1405
1406struct wmi_tlv_prb_tmpl_cmd {
1407 __le32 vdev_id;
1408 __le32 buf_len;
1409} __packed;
1410
1411struct wmi_tlv_p2p_go_bcn_ie {
1412 __le32 vdev_id;
1413 __le32 ie_len;
1414} __packed;
1415
1416enum wmi_tlv_diag_item_type {
1417 WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
1418 WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
1419 WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
1420};
1421
1422struct wmi_tlv_diag_item {
1423 u8 type;
1424 u8 reserved;
1425 __le16 len;
1426 __le32 timestamp;
1427 __le32 code;
1428 u8 payload[0];
1429} __packed;
1430
1431struct wmi_tlv_diag_data_ev {
1432 __le32 num_items;
1433} __packed;
1434
1435struct wmi_tlv_sta_keepalive_cmd {
1436 __le32 vdev_id;
1437 __le32 enabled;
1438 __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
1439 __le32 interval; /* in seconds */
1440} __packed;
1441
1442void ath10k_wmi_tlv_attach(struct ath10k *ar);
1443
1444#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index c0f3e4d09263..aeea1c793943 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -22,8 +22,10 @@
22#include "htc.h" 22#include "htc.h"
23#include "debug.h" 23#include "debug.h"
24#include "wmi.h" 24#include "wmi.h"
25#include "wmi-tlv.h"
25#include "mac.h" 26#include "mac.h"
26#include "testmode.h" 27#include "testmode.h"
28#include "wmi-ops.h"
27 29
28/* MAIN WMI cmd track */ 30/* MAIN WMI cmd track */
29static struct wmi_cmd_map wmi_cmd_map = { 31static struct wmi_cmd_map wmi_cmd_map = {
@@ -143,6 +145,7 @@ static struct wmi_cmd_map wmi_cmd_map = {
143 .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID, 145 .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
144 .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID, 146 .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
145 .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID, 147 .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
148 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
146}; 149};
147 150
148/* 10.X WMI cmd track */ 151/* 10.X WMI cmd track */
@@ -265,6 +268,129 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
265 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, 268 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
266 .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID, 269 .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
267 .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID, 270 .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
271 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
272};
273
274/* 10.2.4 WMI cmd track */
275static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
276 .init_cmdid = WMI_10_2_INIT_CMDID,
277 .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
278 .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
279 .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
280 .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
281 .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
282 .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
283 .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
284 .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
285 .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
286 .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
287 .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
288 .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
289 .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
290 .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
291 .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
292 .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
293 .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
294 .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
295 .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
296 .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
297 .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
298 .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
299 .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
300 .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
301 .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
302 .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
303 .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
304 .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
305 .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
306 .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
307 .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
308 .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
309 .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
310 .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
311 .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
312 .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
313 .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
314 .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
315 .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
316 .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
317 .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
318 .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
319 .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
320 .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
321 .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
322 .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
323 .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
324 .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
325 .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
326 .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
327 .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
328 .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
329 .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
330 .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
331 .roam_scan_rssi_change_threshold =
332 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
333 .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
334 .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
335 .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
336 .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
337 .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
338 .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
339 .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
340 .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
341 .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
342 .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
343 .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
344 .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
345 .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
346 .wlan_profile_set_hist_intvl_cmdid =
347 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
348 .wlan_profile_get_profile_data_cmdid =
349 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
350 .wlan_profile_enable_profile_id_cmdid =
351 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
352 .wlan_profile_list_profile_id_cmdid =
353 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
354 .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
355 .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
356 .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
357 .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
358 .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
359 .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
360 .wow_enable_disable_wake_event_cmdid =
361 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
362 .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
363 .wow_hostwakeup_from_sleep_cmdid =
364 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
365 .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
366 .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
367 .vdev_spectral_scan_configure_cmdid =
368 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
369 .vdev_spectral_scan_enable_cmdid =
370 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
371 .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
372 .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
373 .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
374 .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
375 .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
376 .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
377 .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
378 .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
379 .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
380 .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
381 .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
382 .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
383 .echo_cmdid = WMI_10_2_ECHO_CMDID,
384 .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
385 .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
386 .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
387 .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
388 .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
389 .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
390 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
391 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
392 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
393 .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
268}; 394};
269 395
270/* MAIN WMI VDEV param map */ 396/* MAIN WMI VDEV param map */
@@ -385,6 +511,64 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
385 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, 511 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
386}; 512};
387 513
514static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
515 .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
516 .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
517 .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
518 .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
519 .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
520 .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
521 .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
522 .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
523 .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
524 .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
525 .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
526 .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
527 .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
528 .wmi_vdev_oc_scheduler_air_time_limit =
529 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
530 .wds = WMI_10X_VDEV_PARAM_WDS,
531 .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
532 .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
533 .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
534 .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
535 .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
536 .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
537 .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
538 .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
539 .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
540 .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
541 .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
542 .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
543 .sgi = WMI_10X_VDEV_PARAM_SGI,
544 .ldpc = WMI_10X_VDEV_PARAM_LDPC,
545 .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
546 .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
547 .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
548 .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
549 .nss = WMI_10X_VDEV_PARAM_NSS,
550 .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
551 .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
552 .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
553 .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
554 .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
555 .ap_keepalive_min_idle_inactive_time_secs =
556 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
557 .ap_keepalive_max_idle_inactive_time_secs =
558 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
559 .ap_keepalive_max_unresponsive_time_secs =
560 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
561 .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
562 .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
563 .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
564 .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
565 .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
566 .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
567 .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
568 .ap_detect_out_of_sync_sleeping_sta_time_secs =
569 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
570};
571
388static struct wmi_pdev_param_map wmi_pdev_param_map = { 572static struct wmi_pdev_param_map wmi_pdev_param_map = {
389 .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK, 573 .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
390 .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK, 574 .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
@@ -434,6 +618,7 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
434 .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED, 618 .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
435 .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED, 619 .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
436 .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED, 620 .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
621 .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
437}; 622};
438 623
439static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { 624static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -486,6 +671,60 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
486 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, 671 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
487 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, 672 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
488 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, 673 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
674 .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
675};
676
677static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
678 .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
679 .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
680 .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
681 .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
682 .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
683 .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
684 .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
685 .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
686 .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
687 .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
688 .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
689 .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
690 .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
691 .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
692 .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
693 .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
694 .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
695 .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
696 .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
697 .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
698 .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
699 .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
700 .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
701 .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
702 .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
703 .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
704 .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
705 .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
706 .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
707 .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
708 .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
709 .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
710 .bcnflt_stats_update_period =
711 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
712 .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
713 .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
714 .dcs = WMI_10X_PDEV_PARAM_DCS,
715 .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
716 .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
717 .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
718 .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
719 .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
720 .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
721 .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
722 .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
723 .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
724 .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
725 .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
726 .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
727 .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
489}; 728};
490 729
491/* firmware 10.2 specific mappings */ 730/* firmware 10.2 specific mappings */
@@ -607,11 +846,11 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
607 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, 846 .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
608 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, 847 .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
609 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, 848 .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
849 .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
610}; 850};
611 851
612static void 852void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
613ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, 853 const struct wmi_channel_arg *arg)
614 const struct wmi_channel_arg *arg)
615{ 854{
616 u32 flags = 0; 855 u32 flags = 0;
617 856
@@ -685,8 +924,8 @@ static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
685 dev_kfree_skb(skb); 924 dev_kfree_skb(skb);
686} 925}
687 926
688static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, 927int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
689 u32 cmd_id) 928 u32 cmd_id)
690{ 929{
691 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 930 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
692 struct wmi_cmd_hdr *cmd_hdr; 931 struct wmi_cmd_hdr *cmd_hdr;
@@ -717,23 +956,45 @@ err_pull:
717 956
718static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) 957static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
719{ 958{
959 struct ath10k *ar = arvif->ar;
960 struct ath10k_skb_cb *cb;
961 struct sk_buff *bcn;
720 int ret; 962 int ret;
721 963
722 lockdep_assert_held(&arvif->ar->data_lock); 964 spin_lock_bh(&ar->data_lock);
723 965
724 if (arvif->beacon == NULL) 966 bcn = arvif->beacon;
725 return;
726 967
727 if (arvif->beacon_sent) 968 if (!bcn)
728 return; 969 goto unlock;
729 970
730 ret = ath10k_wmi_beacon_send_ref_nowait(arvif); 971 cb = ATH10K_SKB_CB(bcn);
731 if (ret) 972
732 return; 973 switch (arvif->beacon_state) {
974 case ATH10K_BEACON_SENDING:
975 case ATH10K_BEACON_SENT:
976 break;
977 case ATH10K_BEACON_SCHEDULED:
978 arvif->beacon_state = ATH10K_BEACON_SENDING;
979 spin_unlock_bh(&ar->data_lock);
980
981 ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
982 arvif->vdev_id,
983 bcn->data, bcn->len,
984 cb->paddr,
985 cb->bcn.dtim_zero,
986 cb->bcn.deliver_cab);
987
988 spin_lock_bh(&ar->data_lock);
989
990 if (ret == 0)
991 arvif->beacon_state = ATH10K_BEACON_SENT;
992 else
993 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
994 }
733 995
734 /* We need to retain the arvif->beacon reference for DMA unmapping and 996unlock:
735 * freeing the skbuff later. */ 997 spin_unlock_bh(&ar->data_lock);
736 arvif->beacon_sent = true;
737} 998}
738 999
739static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, 1000static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
@@ -746,12 +1007,10 @@ static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
746 1007
747static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) 1008static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
748{ 1009{
749 spin_lock_bh(&ar->data_lock);
750 ieee80211_iterate_active_interfaces_atomic(ar->hw, 1010 ieee80211_iterate_active_interfaces_atomic(ar->hw,
751 IEEE80211_IFACE_ITER_NORMAL, 1011 IEEE80211_IFACE_ITER_NORMAL,
752 ath10k_wmi_tx_beacons_iter, 1012 ath10k_wmi_tx_beacons_iter,
753 NULL); 1013 NULL);
754 spin_unlock_bh(&ar->data_lock);
755} 1014}
756 1015
757static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) 1016static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
@@ -792,24 +1051,23 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
792 return ret; 1051 return ret;
793} 1052}
794 1053
795int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) 1054static struct sk_buff *
1055ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
796{ 1056{
797 int ret = 0;
798 struct wmi_mgmt_tx_cmd *cmd; 1057 struct wmi_mgmt_tx_cmd *cmd;
799 struct ieee80211_hdr *hdr; 1058 struct ieee80211_hdr *hdr;
800 struct sk_buff *wmi_skb; 1059 struct sk_buff *skb;
801 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
802 int len; 1060 int len;
803 u32 buf_len = skb->len; 1061 u32 buf_len = msdu->len;
804 u16 fc; 1062 u16 fc;
805 1063
806 hdr = (struct ieee80211_hdr *)skb->data; 1064 hdr = (struct ieee80211_hdr *)msdu->data;
807 fc = le16_to_cpu(hdr->frame_control); 1065 fc = le16_to_cpu(hdr->frame_control);
808 1066
809 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) 1067 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
810 return -EINVAL; 1068 return ERR_PTR(-EINVAL);
811 1069
812 len = sizeof(cmd->hdr) + skb->len; 1070 len = sizeof(cmd->hdr) + msdu->len;
813 1071
814 if ((ieee80211_is_action(hdr->frame_control) || 1072 if ((ieee80211_is_action(hdr->frame_control) ||
815 ieee80211_is_deauth(hdr->frame_control) || 1073 ieee80211_is_deauth(hdr->frame_control) ||
@@ -821,36 +1079,27 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
821 1079
822 len = round_up(len, 4); 1080 len = round_up(len, 4);
823 1081
824 wmi_skb = ath10k_wmi_alloc_skb(ar, len); 1082 skb = ath10k_wmi_alloc_skb(ar, len);
825 if (!wmi_skb) 1083 if (!skb)
826 return -ENOMEM; 1084 return ERR_PTR(-ENOMEM);
827 1085
828 cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data; 1086 cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
829 1087
830 cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id); 1088 cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id);
831 cmd->hdr.tx_rate = 0; 1089 cmd->hdr.tx_rate = 0;
832 cmd->hdr.tx_power = 0; 1090 cmd->hdr.tx_power = 0;
833 cmd->hdr.buf_len = __cpu_to_le32(buf_len); 1091 cmd->hdr.buf_len = __cpu_to_le32(buf_len);
834 1092
835 ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr)); 1093 ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
836 memcpy(cmd->buf, skb->data, skb->len); 1094 memcpy(cmd->buf, msdu->data, msdu->len);
837 1095
838 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", 1096 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
839 wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, 1097 msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
840 fc & IEEE80211_FCTL_STYPE); 1098 fc & IEEE80211_FCTL_STYPE);
841 trace_ath10k_tx_hdr(ar, skb->data, skb->len); 1099 trace_ath10k_tx_hdr(ar, skb->data, skb->len);
842 trace_ath10k_tx_payload(ar, skb->data, skb->len); 1100 trace_ath10k_tx_payload(ar, skb->data, skb->len);
843 1101
844 /* Send the management frame buffer to the target */ 1102 return skb;
845 ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
846 if (ret)
847 return ret;
848
849 /* TODO: report tx status to mac80211 - temporary just ACK */
850 info->flags |= IEEE80211_TX_STAT_ACK;
851 ieee80211_tx_status_irqsafe(ar->hw, skb);
852
853 return ret;
854} 1103}
855 1104
856static void ath10k_wmi_event_scan_started(struct ath10k *ar) 1105static void ath10k_wmi_event_scan_started(struct ath10k *ar)
@@ -977,22 +1226,48 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
977 } 1226 }
978} 1227}
979 1228
980static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) 1229static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
1230 struct wmi_scan_ev_arg *arg)
981{ 1231{
982 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; 1232 struct wmi_scan_event *ev = (void *)skb->data;
1233
1234 if (skb->len < sizeof(*ev))
1235 return -EPROTO;
1236
1237 skb_pull(skb, sizeof(*ev));
1238 arg->event_type = ev->event_type;
1239 arg->reason = ev->reason;
1240 arg->channel_freq = ev->channel_freq;
1241 arg->scan_req_id = ev->scan_req_id;
1242 arg->scan_id = ev->scan_id;
1243 arg->vdev_id = ev->vdev_id;
1244
1245 return 0;
1246}
1247
1248int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
1249{
1250 struct wmi_scan_ev_arg arg = {};
983 enum wmi_scan_event_type event_type; 1251 enum wmi_scan_event_type event_type;
984 enum wmi_scan_completion_reason reason; 1252 enum wmi_scan_completion_reason reason;
985 u32 freq; 1253 u32 freq;
986 u32 req_id; 1254 u32 req_id;
987 u32 scan_id; 1255 u32 scan_id;
988 u32 vdev_id; 1256 u32 vdev_id;
1257 int ret;
1258
1259 ret = ath10k_wmi_pull_scan(ar, skb, &arg);
1260 if (ret) {
1261 ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
1262 return ret;
1263 }
989 1264
990 event_type = __le32_to_cpu(event->event_type); 1265 event_type = __le32_to_cpu(arg.event_type);
991 reason = __le32_to_cpu(event->reason); 1266 reason = __le32_to_cpu(arg.reason);
992 freq = __le32_to_cpu(event->channel_freq); 1267 freq = __le32_to_cpu(arg.channel_freq);
993 req_id = __le32_to_cpu(event->scan_req_id); 1268 req_id = __le32_to_cpu(arg.scan_req_id);
994 scan_id = __le32_to_cpu(event->scan_id); 1269 scan_id = __le32_to_cpu(arg.scan_id);
995 vdev_id = __le32_to_cpu(event->vdev_id); 1270 vdev_id = __le32_to_cpu(arg.vdev_id);
996 1271
997 spin_lock_bh(&ar->data_lock); 1272 spin_lock_bh(&ar->data_lock);
998 1273
@@ -1147,11 +1422,51 @@ static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
1147 } 1422 }
1148} 1423}
1149 1424
1150static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) 1425static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
1426 struct wmi_mgmt_rx_ev_arg *arg)
1151{ 1427{
1152 struct wmi_mgmt_rx_event_v1 *ev_v1; 1428 struct wmi_mgmt_rx_event_v1 *ev_v1;
1153 struct wmi_mgmt_rx_event_v2 *ev_v2; 1429 struct wmi_mgmt_rx_event_v2 *ev_v2;
1154 struct wmi_mgmt_rx_hdr_v1 *ev_hdr; 1430 struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
1431 size_t pull_len;
1432 u32 msdu_len;
1433
1434 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
1435 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
1436 ev_hdr = &ev_v2->hdr.v1;
1437 pull_len = sizeof(*ev_v2);
1438 } else {
1439 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
1440 ev_hdr = &ev_v1->hdr;
1441 pull_len = sizeof(*ev_v1);
1442 }
1443
1444 if (skb->len < pull_len)
1445 return -EPROTO;
1446
1447 skb_pull(skb, pull_len);
1448 arg->channel = ev_hdr->channel;
1449 arg->buf_len = ev_hdr->buf_len;
1450 arg->status = ev_hdr->status;
1451 arg->snr = ev_hdr->snr;
1452 arg->phy_mode = ev_hdr->phy_mode;
1453 arg->rate = ev_hdr->rate;
1454
1455 msdu_len = __le32_to_cpu(arg->buf_len);
1456 if (skb->len < msdu_len)
1457 return -EPROTO;
1458
1459 /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
1460 * trailer with credit update. Trim the excess garbage.
1461 */
1462 skb_trim(skb, msdu_len);
1463
1464 return 0;
1465}
1466
1467int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
1468{
1469 struct wmi_mgmt_rx_ev_arg arg = {};
1155 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1470 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1156 struct ieee80211_hdr *hdr; 1471 struct ieee80211_hdr *hdr;
1157 u32 rx_status; 1472 u32 rx_status;
@@ -1161,24 +1476,20 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
1161 u32 rate; 1476 u32 rate;
1162 u32 buf_len; 1477 u32 buf_len;
1163 u16 fc; 1478 u16 fc;
1164 int pull_len; 1479 int ret;
1165 1480
1166 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) { 1481 ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
1167 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; 1482 if (ret) {
1168 ev_hdr = &ev_v2->hdr.v1; 1483 ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
1169 pull_len = sizeof(*ev_v2); 1484 return ret;
1170 } else {
1171 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
1172 ev_hdr = &ev_v1->hdr;
1173 pull_len = sizeof(*ev_v1);
1174 } 1485 }
1175 1486
1176 channel = __le32_to_cpu(ev_hdr->channel); 1487 channel = __le32_to_cpu(arg.channel);
1177 buf_len = __le32_to_cpu(ev_hdr->buf_len); 1488 buf_len = __le32_to_cpu(arg.buf_len);
1178 rx_status = __le32_to_cpu(ev_hdr->status); 1489 rx_status = __le32_to_cpu(arg.status);
1179 snr = __le32_to_cpu(ev_hdr->snr); 1490 snr = __le32_to_cpu(arg.snr);
1180 phy_mode = __le32_to_cpu(ev_hdr->phy_mode); 1491 phy_mode = __le32_to_cpu(arg.phy_mode);
1181 rate = __le32_to_cpu(ev_hdr->rate); 1492 rate = __le32_to_cpu(arg.rate);
1182 1493
1183 memset(status, 0, sizeof(*status)); 1494 memset(status, 0, sizeof(*status));
1184 1495
@@ -1232,8 +1543,6 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
1232 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; 1543 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
1233 status->rate_idx = get_rate_idx(rate, status->band); 1544 status->rate_idx = get_rate_idx(rate, status->band);
1234 1545
1235 skb_pull(skb, pull_len);
1236
1237 hdr = (struct ieee80211_hdr *)skb->data; 1546 hdr = (struct ieee80211_hdr *)skb->data;
1238 fc = le16_to_cpu(hdr->frame_control); 1547 fc = le16_to_cpu(hdr->frame_control);
1239 1548
@@ -1266,12 +1575,6 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
1266 status->freq, status->band, status->signal, 1575 status->freq, status->band, status->signal,
1267 status->rate_idx); 1576 status->rate_idx);
1268 1577
1269 /*
1270 * packets from HTC come aligned to 4byte boundaries
1271 * because they can originally come in along with a trailer
1272 */
1273 skb_trim(skb, buf_len);
1274
1275 ieee80211_rx(ar->hw, skb); 1578 ieee80211_rx(ar->hw, skb);
1276 return 0; 1579 return 0;
1277} 1580}
@@ -1295,21 +1598,44 @@ exit:
1295 return idx; 1598 return idx;
1296} 1599}
1297 1600
1298static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) 1601static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
1602 struct wmi_ch_info_ev_arg *arg)
1299{ 1603{
1300 struct wmi_chan_info_event *ev; 1604 struct wmi_chan_info_event *ev = (void *)skb->data;
1605
1606 if (skb->len < sizeof(*ev))
1607 return -EPROTO;
1608
1609 skb_pull(skb, sizeof(*ev));
1610 arg->err_code = ev->err_code;
1611 arg->freq = ev->freq;
1612 arg->cmd_flags = ev->cmd_flags;
1613 arg->noise_floor = ev->noise_floor;
1614 arg->rx_clear_count = ev->rx_clear_count;
1615 arg->cycle_count = ev->cycle_count;
1616
1617 return 0;
1618}
1619
1620void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
1621{
1622 struct wmi_ch_info_ev_arg arg = {};
1301 struct survey_info *survey; 1623 struct survey_info *survey;
1302 u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count; 1624 u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
1303 int idx; 1625 int idx, ret;
1304 1626
1305 ev = (struct wmi_chan_info_event *)skb->data; 1627 ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
1628 if (ret) {
1629 ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
1630 return;
1631 }
1306 1632
1307 err_code = __le32_to_cpu(ev->err_code); 1633 err_code = __le32_to_cpu(arg.err_code);
1308 freq = __le32_to_cpu(ev->freq); 1634 freq = __le32_to_cpu(arg.freq);
1309 cmd_flags = __le32_to_cpu(ev->cmd_flags); 1635 cmd_flags = __le32_to_cpu(arg.cmd_flags);
1310 noise_floor = __le32_to_cpu(ev->noise_floor); 1636 noise_floor = __le32_to_cpu(arg.noise_floor);
1311 rx_clear_count = __le32_to_cpu(ev->rx_clear_count); 1637 rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
1312 cycle_count = __le32_to_cpu(ev->cycle_count); 1638 cycle_count = __le32_to_cpu(arg.cycle_count);
1313 1639
1314 ath10k_dbg(ar, ATH10K_DBG_WMI, 1640 ath10k_dbg(ar, ATH10K_DBG_WMI,
1315 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", 1641 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
@@ -1344,11 +1670,11 @@ static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
1344 rx_clear_count -= ar->survey_last_rx_clear_count; 1670 rx_clear_count -= ar->survey_last_rx_clear_count;
1345 1671
1346 survey = &ar->survey[idx]; 1672 survey = &ar->survey[idx];
1347 survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count); 1673 survey->time = WMI_CHAN_INFO_MSEC(cycle_count);
1348 survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count); 1674 survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
1349 survey->noise = noise_floor; 1675 survey->noise = noise_floor;
1350 survey->filled = SURVEY_INFO_CHANNEL_TIME | 1676 survey->filled = SURVEY_INFO_TIME |
1351 SURVEY_INFO_CHANNEL_TIME_RX | 1677 SURVEY_INFO_TIME_RX |
1352 SURVEY_INFO_NOISE_DBM; 1678 SURVEY_INFO_NOISE_DBM;
1353 } 1679 }
1354 1680
@@ -1359,12 +1685,12 @@ exit:
1359 spin_unlock_bh(&ar->data_lock); 1685 spin_unlock_bh(&ar->data_lock);
1360} 1686}
1361 1687
1362static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) 1688void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
1363{ 1689{
1364 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); 1690 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
1365} 1691}
1366 1692
1367static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) 1693int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
1368{ 1694{
1369 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", 1695 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
1370 skb->len); 1696 skb->len);
@@ -1374,12 +1700,9 @@ static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
1374 return 0; 1700 return 0;
1375} 1701}
1376 1702
1377static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src, 1703void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
1378 struct ath10k_fw_stats_pdev *dst) 1704 struct ath10k_fw_stats_pdev *dst)
1379{ 1705{
1380 const struct wal_dbg_tx_stats *tx = &src->wal.tx;
1381 const struct wal_dbg_rx_stats *rx = &src->wal.rx;
1382
1383 dst->ch_noise_floor = __le32_to_cpu(src->chan_nf); 1706 dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
1384 dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); 1707 dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
1385 dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); 1708 dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
@@ -1387,57 +1710,76 @@ static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
1387 dst->cycle_count = __le32_to_cpu(src->cycle_count); 1710 dst->cycle_count = __le32_to_cpu(src->cycle_count);
1388 dst->phy_err_count = __le32_to_cpu(src->phy_err_count); 1711 dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
1389 dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); 1712 dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
1713}
1390 1714
1391 dst->comp_queued = __le32_to_cpu(tx->comp_queued); 1715void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
1392 dst->comp_delivered = __le32_to_cpu(tx->comp_delivered); 1716 struct ath10k_fw_stats_pdev *dst)
1393 dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued); 1717{
1394 dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued); 1718 dst->comp_queued = __le32_to_cpu(src->comp_queued);
1395 dst->wmm_drop = __le32_to_cpu(tx->wmm_drop); 1719 dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
1396 dst->local_enqued = __le32_to_cpu(tx->local_enqued); 1720 dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
1397 dst->local_freed = __le32_to_cpu(tx->local_freed); 1721 dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
1398 dst->hw_queued = __le32_to_cpu(tx->hw_queued); 1722 dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
1399 dst->hw_reaped = __le32_to_cpu(tx->hw_reaped); 1723 dst->local_enqued = __le32_to_cpu(src->local_enqued);
1400 dst->underrun = __le32_to_cpu(tx->underrun); 1724 dst->local_freed = __le32_to_cpu(src->local_freed);
1401 dst->tx_abort = __le32_to_cpu(tx->tx_abort); 1725 dst->hw_queued = __le32_to_cpu(src->hw_queued);
1402 dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed); 1726 dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
1403 dst->tx_ko = __le32_to_cpu(tx->tx_ko); 1727 dst->underrun = __le32_to_cpu(src->underrun);
1404 dst->data_rc = __le32_to_cpu(tx->data_rc); 1728 dst->tx_abort = __le32_to_cpu(src->tx_abort);
1405 dst->self_triggers = __le32_to_cpu(tx->self_triggers); 1729 dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
1406 dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure); 1730 dst->tx_ko = __le32_to_cpu(src->tx_ko);
1407 dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err); 1731 dst->data_rc = __le32_to_cpu(src->data_rc);
1408 dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry); 1732 dst->self_triggers = __le32_to_cpu(src->self_triggers);
1409 dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout); 1733 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
1410 dst->pdev_resets = __le32_to_cpu(tx->pdev_resets); 1734 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
1411 dst->phy_underrun = __le32_to_cpu(tx->phy_underrun); 1735 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
1412 dst->txop_ovf = __le32_to_cpu(tx->txop_ovf); 1736 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
1413 1737 dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
1414 dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change); 1738 dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
1415 dst->status_rcvd = __le32_to_cpu(rx->status_rcvd); 1739 dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
1416 dst->r0_frags = __le32_to_cpu(rx->r0_frags); 1740}
1417 dst->r1_frags = __le32_to_cpu(rx->r1_frags); 1741
1418 dst->r2_frags = __le32_to_cpu(rx->r2_frags); 1742void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
1419 dst->r3_frags = __le32_to_cpu(rx->r3_frags); 1743 struct ath10k_fw_stats_pdev *dst)
1420 dst->htt_msdus = __le32_to_cpu(rx->htt_msdus); 1744{
1421 dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus); 1745 dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
1422 dst->loc_msdus = __le32_to_cpu(rx->loc_msdus); 1746 dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
1423 dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus); 1747 dst->r0_frags = __le32_to_cpu(src->r0_frags);
1424 dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu); 1748 dst->r1_frags = __le32_to_cpu(src->r1_frags);
1425 dst->phy_errs = __le32_to_cpu(rx->phy_errs); 1749 dst->r2_frags = __le32_to_cpu(src->r2_frags);
1426 dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop); 1750 dst->r3_frags = __le32_to_cpu(src->r3_frags);
1427 dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs); 1751 dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
1428} 1752 dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
1429 1753 dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
1430static void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, 1754 dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
1431 struct ath10k_fw_stats_peer *dst) 1755 dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
1756 dst->phy_errs = __le32_to_cpu(src->phy_errs);
1757 dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
1758 dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
1759}
1760
1761void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
1762 struct ath10k_fw_stats_pdev *dst)
1763{
1764 dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
1765 dst->rts_bad = __le32_to_cpu(src->rts_bad);
1766 dst->rts_good = __le32_to_cpu(src->rts_good);
1767 dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
1768 dst->no_beacons = __le32_to_cpu(src->no_beacons);
1769 dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
1770}
1771
1772void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
1773 struct ath10k_fw_stats_peer *dst)
1432{ 1774{
1433 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); 1775 ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
1434 dst->peer_rssi = __le32_to_cpu(src->peer_rssi); 1776 dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
1435 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); 1777 dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
1436} 1778}
1437 1779
1438static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar, 1780static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
1439 struct sk_buff *skb, 1781 struct sk_buff *skb,
1440 struct ath10k_fw_stats *stats) 1782 struct ath10k_fw_stats *stats)
1441{ 1783{
1442 const struct wmi_stats_event *ev = (void *)skb->data; 1784 const struct wmi_stats_event *ev = (void *)skb->data;
1443 u32 num_pdev_stats, num_vdev_stats, num_peer_stats; 1785 u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
@@ -1462,7 +1804,10 @@ static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar,
1462 if (!dst) 1804 if (!dst)
1463 continue; 1805 continue;
1464 1806
1465 ath10k_wmi_pull_pdev_stats(src, dst); 1807 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1808 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1809 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1810
1466 list_add_tail(&dst->list, &stats->pdevs); 1811 list_add_tail(&dst->list, &stats->pdevs);
1467 } 1812 }
1468 1813
@@ -1487,9 +1832,9 @@ static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar,
1487 return 0; 1832 return 0;
1488} 1833}
1489 1834
1490static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar, 1835static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
1491 struct sk_buff *skb, 1836 struct sk_buff *skb,
1492 struct ath10k_fw_stats *stats) 1837 struct ath10k_fw_stats *stats)
1493{ 1838{
1494 const struct wmi_stats_event *ev = (void *)skb->data; 1839 const struct wmi_stats_event *ev = (void *)skb->data;
1495 u32 num_pdev_stats, num_vdev_stats, num_peer_stats; 1840 u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
@@ -1514,14 +1859,10 @@ static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar,
1514 if (!dst) 1859 if (!dst)
1515 continue; 1860 continue;
1516 1861
1517 ath10k_wmi_pull_pdev_stats(&src->old, dst); 1862 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1518 1863 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1519 dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad); 1864 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1520 dst->rts_bad = __le32_to_cpu(src->rts_bad); 1865 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
1521 dst->rts_good = __le32_to_cpu(src->rts_good);
1522 dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
1523 dst->no_beacons = __le32_to_cpu(src->no_beacons);
1524 dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
1525 1866
1526 list_add_tail(&dst->list, &stats->pdevs); 1867 list_add_tail(&dst->list, &stats->pdevs);
1527 } 1868 }
@@ -1550,61 +1891,250 @@ static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar,
1550 return 0; 1891 return 0;
1551} 1892}
1552 1893
1553int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 1894static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
1554 struct ath10k_fw_stats *stats) 1895 struct sk_buff *skb,
1896 struct ath10k_fw_stats *stats)
1555{ 1897{
1556 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) 1898 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
1557 return ath10k_wmi_10x_pull_fw_stats(ar, skb, stats); 1899 u32 num_pdev_stats;
1558 else 1900 u32 num_pdev_ext_stats;
1559 return ath10k_wmi_main_pull_fw_stats(ar, skb, stats); 1901 u32 num_vdev_stats;
1902 u32 num_peer_stats;
1903 int i;
1904
1905 if (!skb_pull(skb, sizeof(*ev)))
1906 return -EPROTO;
1907
1908 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1909 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
1910 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1911 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1912
1913 for (i = 0; i < num_pdev_stats; i++) {
1914 const struct wmi_10_2_pdev_stats *src;
1915 struct ath10k_fw_stats_pdev *dst;
1916
1917 src = (void *)skb->data;
1918 if (!skb_pull(skb, sizeof(*src)))
1919 return -EPROTO;
1920
1921 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1922 if (!dst)
1923 continue;
1924
1925 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1926 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1927 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1928 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
1929 /* FIXME: expose 10.2 specific values */
1930
1931 list_add_tail(&dst->list, &stats->pdevs);
1932 }
1933
1934 for (i = 0; i < num_pdev_ext_stats; i++) {
1935 const struct wmi_10_2_pdev_ext_stats *src;
1936
1937 src = (void *)skb->data;
1938 if (!skb_pull(skb, sizeof(*src)))
1939 return -EPROTO;
1940
1941 /* FIXME: expose values to userspace
1942 *
1943 * Note: Even though this loop seems to do nothing it is
1944 * required to parse following sub-structures properly.
1945 */
1946 }
1947
1948 /* fw doesn't implement vdev stats */
1949
1950 for (i = 0; i < num_peer_stats; i++) {
1951 const struct wmi_10_2_peer_stats *src;
1952 struct ath10k_fw_stats_peer *dst;
1953
1954 src = (void *)skb->data;
1955 if (!skb_pull(skb, sizeof(*src)))
1956 return -EPROTO;
1957
1958 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1959 if (!dst)
1960 continue;
1961
1962 ath10k_wmi_pull_peer_stats(&src->old, dst);
1963
1964 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1965 /* FIXME: expose 10.2 specific values */
1966
1967 list_add_tail(&dst->list, &stats->peers);
1968 }
1969
1970 return 0;
1971}
1972
1973static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
1974 struct sk_buff *skb,
1975 struct ath10k_fw_stats *stats)
1976{
1977 const struct wmi_10_2_stats_event *ev = (void *)skb->data;
1978 u32 num_pdev_stats;
1979 u32 num_pdev_ext_stats;
1980 u32 num_vdev_stats;
1981 u32 num_peer_stats;
1982 int i;
1983
1984 if (!skb_pull(skb, sizeof(*ev)))
1985 return -EPROTO;
1986
1987 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1988 num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
1989 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1990 num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1991
1992 for (i = 0; i < num_pdev_stats; i++) {
1993 const struct wmi_10_2_pdev_stats *src;
1994 struct ath10k_fw_stats_pdev *dst;
1995
1996 src = (void *)skb->data;
1997 if (!skb_pull(skb, sizeof(*src)))
1998 return -EPROTO;
1999
2000 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2001 if (!dst)
2002 continue;
2003
2004 ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
2005 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
2006 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
2007 ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
2008 /* FIXME: expose 10.2 specific values */
2009
2010 list_add_tail(&dst->list, &stats->pdevs);
2011 }
2012
2013 for (i = 0; i < num_pdev_ext_stats; i++) {
2014 const struct wmi_10_2_pdev_ext_stats *src;
2015
2016 src = (void *)skb->data;
2017 if (!skb_pull(skb, sizeof(*src)))
2018 return -EPROTO;
2019
2020 /* FIXME: expose values to userspace
2021 *
2022 * Note: Even though this loop seems to do nothing it is
2023 * required to parse following sub-structures properly.
2024 */
2025 }
2026
2027 /* fw doesn't implement vdev stats */
2028
2029 for (i = 0; i < num_peer_stats; i++) {
2030 const struct wmi_10_2_4_peer_stats *src;
2031 struct ath10k_fw_stats_peer *dst;
2032
2033 src = (void *)skb->data;
2034 if (!skb_pull(skb, sizeof(*src)))
2035 return -EPROTO;
2036
2037 dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
2038 if (!dst)
2039 continue;
2040
2041 ath10k_wmi_pull_peer_stats(&src->common.old, dst);
2042
2043 dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
2044 /* FIXME: expose 10.2 specific values */
2045
2046 list_add_tail(&dst->list, &stats->peers);
2047 }
2048
2049 return 0;
1560} 2050}
1561 2051
1562static void ath10k_wmi_event_update_stats(struct ath10k *ar, 2052void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
1563 struct sk_buff *skb)
1564{ 2053{
1565 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); 2054 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
1566 ath10k_debug_fw_stats_process(ar, skb); 2055 ath10k_debug_fw_stats_process(ar, skb);
1567} 2056}
1568 2057
1569static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, 2058static int
1570 struct sk_buff *skb) 2059ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
2060 struct wmi_vdev_start_ev_arg *arg)
2061{
2062 struct wmi_vdev_start_response_event *ev = (void *)skb->data;
2063
2064 if (skb->len < sizeof(*ev))
2065 return -EPROTO;
2066
2067 skb_pull(skb, sizeof(*ev));
2068 arg->vdev_id = ev->vdev_id;
2069 arg->req_id = ev->req_id;
2070 arg->resp_type = ev->resp_type;
2071 arg->status = ev->status;
2072
2073 return 0;
2074}
2075
2076void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
1571{ 2077{
1572 struct wmi_vdev_start_response_event *ev; 2078 struct wmi_vdev_start_ev_arg arg = {};
2079 int ret;
1573 2080
1574 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); 2081 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
1575 2082
1576 ev = (struct wmi_vdev_start_response_event *)skb->data; 2083 ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
2084 if (ret) {
2085 ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
2086 return;
2087 }
1577 2088
1578 if (WARN_ON(__le32_to_cpu(ev->status))) 2089 if (WARN_ON(__le32_to_cpu(arg.status)))
1579 return; 2090 return;
1580 2091
1581 complete(&ar->vdev_setup_done); 2092 complete(&ar->vdev_setup_done);
1582} 2093}
1583 2094
1584static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, 2095void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
1585 struct sk_buff *skb)
1586{ 2096{
1587 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); 2097 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
1588 complete(&ar->vdev_setup_done); 2098 complete(&ar->vdev_setup_done);
1589} 2099}
1590 2100
1591static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, 2101static int
1592 struct sk_buff *skb) 2102ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
2103 struct wmi_peer_kick_ev_arg *arg)
1593{ 2104{
1594 struct wmi_peer_sta_kickout_event *ev; 2105 struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
2106
2107 if (skb->len < sizeof(*ev))
2108 return -EPROTO;
2109
2110 skb_pull(skb, sizeof(*ev));
2111 arg->mac_addr = ev->peer_macaddr.addr;
2112
2113 return 0;
2114}
2115
2116void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
2117{
2118 struct wmi_peer_kick_ev_arg arg = {};
1595 struct ieee80211_sta *sta; 2119 struct ieee80211_sta *sta;
2120 int ret;
1596 2121
1597 ev = (struct wmi_peer_sta_kickout_event *)skb->data; 2122 ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
2123 if (ret) {
2124 ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
2125 ret);
2126 return;
2127 }
1598 2128
1599 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", 2129 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
1600 ev->peer_macaddr.addr); 2130 arg.mac_addr);
1601 2131
1602 rcu_read_lock(); 2132 rcu_read_lock();
1603 2133
1604 sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL); 2134 sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
1605 if (!sta) { 2135 if (!sta) {
1606 ath10k_warn(ar, "Spurious quick kickout for STA %pM\n", 2136 ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
1607 ev->peer_macaddr.addr); 2137 arg.mac_addr);
1608 goto exit; 2138 goto exit;
1609 } 2139 }
1610 2140
@@ -1641,7 +2171,7 @@ exit:
1641static void ath10k_wmi_update_tim(struct ath10k *ar, 2171static void ath10k_wmi_update_tim(struct ath10k *ar,
1642 struct ath10k_vif *arvif, 2172 struct ath10k_vif *arvif,
1643 struct sk_buff *bcn, 2173 struct sk_buff *bcn,
1644 struct wmi_bcn_info *bcn_info) 2174 const struct wmi_tim_info *tim_info)
1645{ 2175{
1646 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data; 2176 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
1647 struct ieee80211_tim_ie *tim; 2177 struct ieee80211_tim_ie *tim;
@@ -1652,14 +2182,14 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
1652 2182
1653 /* if next SWBA has no tim_changed the tim_bitmap is garbage. 2183 /* if next SWBA has no tim_changed the tim_bitmap is garbage.
1654 * we must copy the bitmap upon change and reuse it later */ 2184 * we must copy the bitmap upon change and reuse it later */
1655 if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) { 2185 if (__le32_to_cpu(tim_info->tim_changed)) {
1656 int i; 2186 int i;
1657 2187
1658 BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) != 2188 BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
1659 sizeof(bcn_info->tim_info.tim_bitmap)); 2189 sizeof(tim_info->tim_bitmap));
1660 2190
1661 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) { 2191 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
1662 t = bcn_info->tim_info.tim_bitmap[i / 4]; 2192 t = tim_info->tim_bitmap[i / 4];
1663 v = __le32_to_cpu(t); 2193 v = __le32_to_cpu(t);
1664 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; 2194 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
1665 } 2195 }
@@ -1711,13 +2241,13 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
1711 return; 2241 return;
1712 } 2242 }
1713 2243
1714 tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast); 2244 tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
1715 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); 2245 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
1716 2246
1717 if (tim->dtim_count == 0) { 2247 if (tim->dtim_count == 0) {
1718 ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true; 2248 ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
1719 2249
1720 if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1) 2250 if (__le32_to_cpu(tim_info->tim_mcast) == 1)
1721 ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true; 2251 ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
1722 } 2252 }
1723 2253
@@ -1727,7 +2257,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
1727} 2257}
1728 2258
1729static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len, 2259static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
1730 struct wmi_p2p_noa_info *noa) 2260 const struct wmi_p2p_noa_info *noa)
1731{ 2261{
1732 struct ieee80211_p2p_noa_attr *noa_attr; 2262 struct ieee80211_p2p_noa_attr *noa_attr;
1733 u8 ctwindow_oppps = noa->ctwindow_oppps; 2263 u8 ctwindow_oppps = noa->ctwindow_oppps;
@@ -1769,7 +2299,7 @@ static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
1769 *noa_attr_len = __cpu_to_le16(attr_len); 2299 *noa_attr_len = __cpu_to_le16(attr_len);
1770} 2300}
1771 2301
1772static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa) 2302static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
1773{ 2303{
1774 u32 len = 0; 2304 u32 len = 0;
1775 u8 noa_descriptors = noa->num_descriptors; 2305 u8 noa_descriptors = noa->num_descriptors;
@@ -1789,9 +2319,8 @@ static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
1789 2319
1790static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, 2320static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
1791 struct sk_buff *bcn, 2321 struct sk_buff *bcn,
1792 struct wmi_bcn_info *bcn_info) 2322 const struct wmi_p2p_noa_info *noa)
1793{ 2323{
1794 struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
1795 u8 *new_data, *old_data = arvif->u.ap.noa_data; 2324 u8 *new_data, *old_data = arvif->u.ap.noa_data;
1796 u32 new_len; 2325 u32 new_len;
1797 2326
@@ -1832,22 +2361,59 @@ cleanup:
1832 kfree(old_data); 2361 kfree(old_data);
1833} 2362}
1834 2363
1835static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) 2364static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
2365 struct wmi_swba_ev_arg *arg)
1836{ 2366{
1837 struct wmi_host_swba_event *ev; 2367 struct wmi_host_swba_event *ev = (void *)skb->data;
2368 u32 map;
2369 size_t i;
2370
2371 if (skb->len < sizeof(*ev))
2372 return -EPROTO;
2373
2374 skb_pull(skb, sizeof(*ev));
2375 arg->vdev_map = ev->vdev_map;
2376
2377 for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
2378 if (!(map & BIT(0)))
2379 continue;
2380
2381 /* If this happens there were some changes in firmware and
2382 * ath10k should update the max size of tim_info array.
2383 */
2384 if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
2385 break;
2386
2387 arg->tim_info[i] = &ev->bcn_info[i].tim_info;
2388 arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
2389 i++;
2390 }
2391
2392 return 0;
2393}
2394
2395void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
2396{
2397 struct wmi_swba_ev_arg arg = {};
1838 u32 map; 2398 u32 map;
1839 int i = -1; 2399 int i = -1;
1840 struct wmi_bcn_info *bcn_info; 2400 const struct wmi_tim_info *tim_info;
2401 const struct wmi_p2p_noa_info *noa_info;
1841 struct ath10k_vif *arvif; 2402 struct ath10k_vif *arvif;
1842 struct sk_buff *bcn; 2403 struct sk_buff *bcn;
1843 dma_addr_t paddr; 2404 dma_addr_t paddr;
1844 int ret, vdev_id = 0; 2405 int ret, vdev_id = 0;
1845 2406
1846 ev = (struct wmi_host_swba_event *)skb->data; 2407 ret = ath10k_wmi_pull_swba(ar, skb, &arg);
1847 map = __le32_to_cpu(ev->vdev_map); 2408 if (ret) {
2409 ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
2410 return;
2411 }
2412
2413 map = __le32_to_cpu(arg.vdev_map);
1848 2414
1849 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", 2415 ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
1850 ev->vdev_map); 2416 map);
1851 2417
1852 for (; map; map >>= 1, vdev_id++) { 2418 for (; map; map >>= 1, vdev_id++) {
1853 if (!(map & 0x1)) 2419 if (!(map & 0x1))
@@ -1860,19 +2426,20 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1860 break; 2426 break;
1861 } 2427 }
1862 2428
1863 bcn_info = &ev->bcn_info[i]; 2429 tim_info = arg.tim_info[i];
2430 noa_info = arg.noa_info[i];
1864 2431
1865 ath10k_dbg(ar, ATH10K_DBG_MGMT, 2432 ath10k_dbg(ar, ATH10K_DBG_MGMT,
1866 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n", 2433 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
1867 i, 2434 i,
1868 __le32_to_cpu(bcn_info->tim_info.tim_len), 2435 __le32_to_cpu(tim_info->tim_len),
1869 __le32_to_cpu(bcn_info->tim_info.tim_mcast), 2436 __le32_to_cpu(tim_info->tim_mcast),
1870 __le32_to_cpu(bcn_info->tim_info.tim_changed), 2437 __le32_to_cpu(tim_info->tim_changed),
1871 __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending), 2438 __le32_to_cpu(tim_info->tim_num_ps_pending),
1872 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]), 2439 __le32_to_cpu(tim_info->tim_bitmap[3]),
1873 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]), 2440 __le32_to_cpu(tim_info->tim_bitmap[2]),
1874 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]), 2441 __le32_to_cpu(tim_info->tim_bitmap[1]),
1875 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0])); 2442 __le32_to_cpu(tim_info->tim_bitmap[0]));
1876 2443
1877 arvif = ath10k_get_arvif(ar, vdev_id); 2444 arvif = ath10k_get_arvif(ar, vdev_id);
1878 if (arvif == NULL) { 2445 if (arvif == NULL) {
@@ -1899,15 +2466,25 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1899 } 2466 }
1900 2467
1901 ath10k_tx_h_seq_no(arvif->vif, bcn); 2468 ath10k_tx_h_seq_no(arvif->vif, bcn);
1902 ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info); 2469 ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
1903 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); 2470 ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
1904 2471
1905 spin_lock_bh(&ar->data_lock); 2472 spin_lock_bh(&ar->data_lock);
1906 2473
1907 if (arvif->beacon) { 2474 if (arvif->beacon) {
1908 if (!arvif->beacon_sent) 2475 switch (arvif->beacon_state) {
1909 ath10k_warn(ar, "SWBA overrun on vdev %d\n", 2476 case ATH10K_BEACON_SENT:
2477 break;
2478 case ATH10K_BEACON_SCHEDULED:
2479 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
2480 arvif->vdev_id);
2481 break;
2482 case ATH10K_BEACON_SENDING:
2483 ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
1910 arvif->vdev_id); 2484 arvif->vdev_id);
2485 dev_kfree_skb(bcn);
2486 goto skip;
2487 }
1911 2488
1912 ath10k_mac_vif_beacon_free(arvif); 2489 ath10k_mac_vif_beacon_free(arvif);
1913 } 2490 }
@@ -1935,19 +2512,19 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
1935 } 2512 }
1936 2513
1937 arvif->beacon = bcn; 2514 arvif->beacon = bcn;
1938 arvif->beacon_sent = false; 2515 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
1939 2516
1940 trace_ath10k_tx_hdr(ar, bcn->data, bcn->len); 2517 trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
1941 trace_ath10k_tx_payload(ar, bcn->data, bcn->len); 2518 trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
1942 2519
1943 ath10k_wmi_tx_beacon_nowait(arvif);
1944skip: 2520skip:
1945 spin_unlock_bh(&ar->data_lock); 2521 spin_unlock_bh(&ar->data_lock);
1946 } 2522 }
2523
2524 ath10k_wmi_tx_beacons_nowait(ar);
1947} 2525}
1948 2526
1949static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, 2527void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
1950 struct sk_buff *skb)
1951{ 2528{
1952 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); 2529 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
1953} 2530}
@@ -2068,9 +2645,9 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
2068 return 0; 2645 return 0;
2069} 2646}
2070 2647
2071static void ath10k_wmi_event_dfs(struct ath10k *ar, 2648void ath10k_wmi_event_dfs(struct ath10k *ar,
2072 const struct wmi_phyerr *phyerr, 2649 const struct wmi_phyerr *phyerr,
2073 u64 tsf) 2650 u64 tsf)
2074{ 2651{
2075 int buf_len, tlv_len, res, i = 0; 2652 int buf_len, tlv_len, res, i = 0;
2076 const struct phyerr_tlv *tlv; 2653 const struct phyerr_tlv *tlv;
@@ -2133,10 +2710,9 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
2133 } 2710 }
2134} 2711}
2135 2712
2136static void 2713void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
2137ath10k_wmi_event_spectral_scan(struct ath10k *ar, 2714 const struct wmi_phyerr *phyerr,
2138 const struct wmi_phyerr *phyerr, 2715 u64 tsf)
2139 u64 tsf)
2140{ 2716{
2141 int buf_len, tlv_len, res, i = 0; 2717 int buf_len, tlv_len, res, i = 0;
2142 struct phyerr_tlv *tlv; 2718 struct phyerr_tlv *tlv;
@@ -2188,37 +2764,53 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
2188 } 2764 }
2189} 2765}
2190 2766
2191static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) 2767static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb,
2768 struct wmi_phyerr_ev_arg *arg)
2192{ 2769{
2193 const struct wmi_phyerr_event *ev; 2770 struct wmi_phyerr_event *ev = (void *)skb->data;
2771
2772 if (skb->len < sizeof(*ev))
2773 return -EPROTO;
2774
2775 arg->num_phyerrs = ev->num_phyerrs;
2776 arg->tsf_l32 = ev->tsf_l32;
2777 arg->tsf_u32 = ev->tsf_u32;
2778 arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev));
2779 arg->phyerrs = ev->phyerrs;
2780
2781 return 0;
2782}
2783
2784void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
2785{
2786 struct wmi_phyerr_ev_arg arg = {};
2194 const struct wmi_phyerr *phyerr; 2787 const struct wmi_phyerr *phyerr;
2195 u32 count, i, buf_len, phy_err_code; 2788 u32 count, i, buf_len, phy_err_code;
2196 u64 tsf; 2789 u64 tsf;
2197 int left_len = skb->len; 2790 int left_len, ret;
2198 2791
2199 ATH10K_DFS_STAT_INC(ar, phy_errors); 2792 ATH10K_DFS_STAT_INC(ar, phy_errors);
2200 2793
2201 /* Check if combined event available */ 2794 ret = ath10k_wmi_pull_phyerr(ar, skb, &arg);
2202 if (left_len < sizeof(*ev)) { 2795 if (ret) {
2203 ath10k_warn(ar, "wmi phyerr combined event wrong len\n"); 2796 ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret);
2204 return; 2797 return;
2205 } 2798 }
2206 2799
2207 left_len -= sizeof(*ev); 2800 left_len = __le32_to_cpu(arg.buf_len);
2208 2801
2209 /* Check number of included events */ 2802 /* Check number of included events */
2210 ev = (const struct wmi_phyerr_event *)skb->data; 2803 count = __le32_to_cpu(arg.num_phyerrs);
2211 count = __le32_to_cpu(ev->num_phyerrs);
2212 2804
2213 tsf = __le32_to_cpu(ev->tsf_u32); 2805 tsf = __le32_to_cpu(arg.tsf_u32);
2214 tsf <<= 32; 2806 tsf <<= 32;
2215 tsf |= __le32_to_cpu(ev->tsf_l32); 2807 tsf |= __le32_to_cpu(arg.tsf_l32);
2216 2808
2217 ath10k_dbg(ar, ATH10K_DBG_WMI, 2809 ath10k_dbg(ar, ATH10K_DBG_WMI,
2218 "wmi event phyerr count %d tsf64 0x%llX\n", 2810 "wmi event phyerr count %d tsf64 0x%llX\n",
2219 count, tsf); 2811 count, tsf);
2220 2812
2221 phyerr = ev->phyerrs; 2813 phyerr = arg.phyerrs;
2222 for (i = 0; i < count; i++) { 2814 for (i = 0; i < count; i++) {
2223 /* Check if we can read event header */ 2815 /* Check if we can read event header */
2224 if (left_len < sizeof(*phyerr)) { 2816 if (left_len < sizeof(*phyerr)) {
@@ -2258,19 +2850,17 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
2258 } 2850 }
2259} 2851}
2260 2852
2261static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) 2853void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
2262{ 2854{
2263 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); 2855 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
2264} 2856}
2265 2857
2266static void ath10k_wmi_event_profile_match(struct ath10k *ar, 2858void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
2267 struct sk_buff *skb)
2268{ 2859{
2269 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); 2860 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
2270} 2861}
2271 2862
2272static void ath10k_wmi_event_debug_print(struct ath10k *ar, 2863void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
2273 struct sk_buff *skb)
2274{ 2864{
2275 char buf[101], c; 2865 char buf[101], c;
2276 int i; 2866 int i;
@@ -2303,103 +2893,90 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar,
2303 ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf); 2893 ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
2304} 2894}
2305 2895
2306static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) 2896void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
2307{ 2897{
2308 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); 2898 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
2309} 2899}
2310 2900
2311static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, 2901void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
2312 struct sk_buff *skb)
2313{ 2902{
2314 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); 2903 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
2315} 2904}
2316 2905
2317static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, 2906void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
2318 struct sk_buff *skb) 2907 struct sk_buff *skb)
2319{ 2908{
2320 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); 2909 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
2321} 2910}
2322 2911
2323static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, 2912void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
2324 struct sk_buff *skb) 2913 struct sk_buff *skb)
2325{ 2914{
2326 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); 2915 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
2327} 2916}
2328 2917
2329static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, 2918void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
2330 struct sk_buff *skb)
2331{ 2919{
2332 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); 2920 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
2333} 2921}
2334 2922
2335static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, 2923void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
2336 struct sk_buff *skb)
2337{ 2924{
2338 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); 2925 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
2339} 2926}
2340 2927
2341static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, 2928void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
2342 struct sk_buff *skb)
2343{ 2929{
2344 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); 2930 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
2345} 2931}
2346 2932
2347static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, 2933void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
2348 struct sk_buff *skb)
2349{ 2934{
2350 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); 2935 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
2351} 2936}
2352 2937
2353static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, 2938void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
2354 struct sk_buff *skb)
2355{ 2939{
2356 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); 2940 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
2357} 2941}
2358 2942
2359static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, 2943void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
2360 struct sk_buff *skb)
2361{ 2944{
2362 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); 2945 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
2363} 2946}
2364 2947
2365static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, 2948void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
2366 struct sk_buff *skb)
2367{ 2949{
2368 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); 2950 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
2369} 2951}
2370 2952
2371static void ath10k_wmi_event_delba_complete(struct ath10k *ar, 2953void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
2372 struct sk_buff *skb)
2373{ 2954{
2374 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); 2955 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
2375} 2956}
2376 2957
2377static void ath10k_wmi_event_addba_complete(struct ath10k *ar, 2958void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
2378 struct sk_buff *skb)
2379{ 2959{
2380 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); 2960 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
2381} 2961}
2382 2962
2383static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, 2963void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
2384 struct sk_buff *skb) 2964 struct sk_buff *skb)
2385{ 2965{
2386 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); 2966 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
2387} 2967}
2388 2968
2389static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, 2969void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
2390 struct sk_buff *skb)
2391{ 2970{
2392 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); 2971 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
2393} 2972}
2394 2973
2395static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, 2974void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
2396 struct sk_buff *skb)
2397{ 2975{
2398 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); 2976 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
2399} 2977}
2400 2978
2401static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, 2979void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
2402 struct sk_buff *skb)
2403{ 2980{
2404 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); 2981 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
2405} 2982}
@@ -2435,8 +3012,9 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
2435 return 0; 3012 return 0;
2436} 3013}
2437 3014
2438static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb, 3015static int
2439 struct wmi_svc_rdy_ev_arg *arg) 3016ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
3017 struct wmi_svc_rdy_ev_arg *arg)
2440{ 3018{
2441 struct wmi_service_ready_event *ev; 3019 struct wmi_service_ready_event *ev;
2442 size_t i, n; 3020 size_t i, n;
@@ -2471,8 +3049,9 @@ static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb,
2471 return 0; 3049 return 0;
2472} 3050}
2473 3051
2474static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb, 3052static int
2475 struct wmi_svc_rdy_ev_arg *arg) 3053ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
3054 struct wmi_svc_rdy_ev_arg *arg)
2476{ 3055{
2477 struct wmi_10x_service_ready_event *ev; 3056 struct wmi_10x_service_ready_event *ev;
2478 int i, n; 3057 int i, n;
@@ -2506,30 +3085,22 @@ static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb,
2506 return 0; 3085 return 0;
2507} 3086}
2508 3087
2509static void ath10k_wmi_event_service_ready(struct ath10k *ar, 3088void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
2510 struct sk_buff *skb)
2511{ 3089{
2512 struct wmi_svc_rdy_ev_arg arg = {}; 3090 struct wmi_svc_rdy_ev_arg arg = {};
2513 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; 3091 u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
2514 int ret; 3092 int ret;
2515 3093
2516 memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map)); 3094 ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
2517
2518 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
2519 ret = ath10k_wmi_10x_pull_svc_rdy_ev(skb, &arg);
2520 wmi_10x_svc_map(arg.service_map, ar->wmi.svc_map,
2521 arg.service_map_len);
2522 } else {
2523 ret = ath10k_wmi_main_pull_svc_rdy_ev(skb, &arg);
2524 wmi_main_svc_map(arg.service_map, ar->wmi.svc_map,
2525 arg.service_map_len);
2526 }
2527
2528 if (ret) { 3095 if (ret) {
2529 ath10k_warn(ar, "failed to parse service ready: %d\n", ret); 3096 ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
2530 return; 3097 return;
2531 } 3098 }
2532 3099
3100 memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
3101 ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
3102 arg.service_map_len);
3103
2533 ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power); 3104 ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
2534 ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power); 3105 ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
2535 ar->ht_cap_info = __le32_to_cpu(arg.ht_cap); 3106 ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
@@ -2607,13 +3178,14 @@ static void ath10k_wmi_event_service_ready(struct ath10k *ar,
2607 } 3178 }
2608 3179
2609 ath10k_dbg(ar, ATH10K_DBG_WMI, 3180 ath10k_dbg(ar, ATH10K_DBG_WMI,
2610 "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n", 3181 "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
2611 __le32_to_cpu(arg.min_tx_power), 3182 __le32_to_cpu(arg.min_tx_power),
2612 __le32_to_cpu(arg.max_tx_power), 3183 __le32_to_cpu(arg.max_tx_power),
2613 __le32_to_cpu(arg.ht_cap), 3184 __le32_to_cpu(arg.ht_cap),
2614 __le32_to_cpu(arg.vht_cap), 3185 __le32_to_cpu(arg.vht_cap),
2615 __le32_to_cpu(arg.sw_ver0), 3186 __le32_to_cpu(arg.sw_ver0),
2616 __le32_to_cpu(arg.sw_ver1), 3187 __le32_to_cpu(arg.sw_ver1),
3188 __le32_to_cpu(arg.fw_build),
2617 __le32_to_cpu(arg.phy_capab), 3189 __le32_to_cpu(arg.phy_capab),
2618 __le32_to_cpu(arg.num_rf_chains), 3190 __le32_to_cpu(arg.num_rf_chains),
2619 __le32_to_cpu(arg.eeprom_rd), 3191 __le32_to_cpu(arg.eeprom_rd),
@@ -2622,27 +3194,59 @@ static void ath10k_wmi_event_service_ready(struct ath10k *ar,
2622 complete(&ar->wmi.service_ready); 3194 complete(&ar->wmi.service_ready);
2623} 3195}
2624 3196
2625static int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) 3197static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
3198 struct wmi_rdy_ev_arg *arg)
2626{ 3199{
2627 struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; 3200 struct wmi_ready_event *ev = (void *)skb->data;
2628 3201
2629 if (WARN_ON(skb->len < sizeof(*ev))) 3202 if (skb->len < sizeof(*ev))
2630 return -EINVAL; 3203 return -EPROTO;
2631 3204
2632 ether_addr_copy(ar->mac_addr, ev->mac_addr.addr); 3205 skb_pull(skb, sizeof(*ev));
3206 arg->sw_version = ev->sw_version;
3207 arg->abi_version = ev->abi_version;
3208 arg->status = ev->status;
3209 arg->mac_addr = ev->mac_addr.addr;
3210
3211 return 0;
3212}
3213
3214int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
3215{
3216 struct wmi_rdy_ev_arg arg = {};
3217 int ret;
3218
3219 ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
3220 if (ret) {
3221 ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
3222 return ret;
3223 }
2633 3224
2634 ath10k_dbg(ar, ATH10K_DBG_WMI, 3225 ath10k_dbg(ar, ATH10K_DBG_WMI,
2635 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n", 3226 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
2636 __le32_to_cpu(ev->sw_version), 3227 __le32_to_cpu(arg.sw_version),
2637 __le32_to_cpu(ev->abi_version), 3228 __le32_to_cpu(arg.abi_version),
2638 ev->mac_addr.addr, 3229 arg.mac_addr,
2639 __le32_to_cpu(ev->status), skb->len, sizeof(*ev)); 3230 __le32_to_cpu(arg.status));
2640 3231
3232 ether_addr_copy(ar->mac_addr, arg.mac_addr);
2641 complete(&ar->wmi.unified_ready); 3233 complete(&ar->wmi.unified_ready);
2642 return 0; 3234 return 0;
2643} 3235}
2644 3236
2645static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb) 3237static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
3238{
3239 const struct wmi_pdev_temperature_event *ev;
3240
3241 ev = (struct wmi_pdev_temperature_event *)skb->data;
3242 if (WARN_ON(skb->len < sizeof(*ev)))
3243 return -EPROTO;
3244
3245 ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
3246 return 0;
3247}
3248
3249static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
2646{ 3250{
2647 struct wmi_cmd_hdr *cmd_hdr; 3251 struct wmi_cmd_hdr *cmd_hdr;
2648 enum wmi_event_id id; 3252 enum wmi_event_id id;
@@ -2758,7 +3362,7 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
2758 dev_kfree_skb(skb); 3362 dev_kfree_skb(skb);
2759} 3363}
2760 3364
2761static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb) 3365static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
2762{ 3366{
2763 struct wmi_cmd_hdr *cmd_hdr; 3367 struct wmi_cmd_hdr *cmd_hdr;
2764 enum wmi_10x_event_id id; 3368 enum wmi_10x_event_id id;
@@ -2882,7 +3486,7 @@ out:
2882 dev_kfree_skb(skb); 3486 dev_kfree_skb(skb);
2883} 3487}
2884 3488
2885static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb) 3489static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
2886{ 3490{
2887 struct wmi_cmd_hdr *cmd_hdr; 3491 struct wmi_cmd_hdr *cmd_hdr;
2888 enum wmi_10_2_event_id id; 3492 enum wmi_10_2_event_id id;
@@ -2981,6 +3585,9 @@ static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
2981 case WMI_10_2_READY_EVENTID: 3585 case WMI_10_2_READY_EVENTID:
2982 ath10k_wmi_event_ready(ar, skb); 3586 ath10k_wmi_event_ready(ar, skb);
2983 break; 3587 break;
3588 case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
3589 ath10k_wmi_event_temperature(ar, skb);
3590 break;
2984 case WMI_10_2_RTT_KEEPALIVE_EVENTID: 3591 case WMI_10_2_RTT_KEEPALIVE_EVENTID:
2985 case WMI_10_2_GPIO_INPUT_EVENTID: 3592 case WMI_10_2_GPIO_INPUT_EVENTID:
2986 case WMI_10_2_PEER_RATECODE_LIST_EVENTID: 3593 case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
@@ -3001,14 +3608,11 @@ static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
3001 3608
3002static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) 3609static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
3003{ 3610{
3004 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 3611 int ret;
3005 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) 3612
3006 ath10k_wmi_10_2_process_rx(ar, skb); 3613 ret = ath10k_wmi_rx(ar, skb);
3007 else 3614 if (ret)
3008 ath10k_wmi_10x_process_rx(ar, skb); 3615 ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
3009 } else {
3010 ath10k_wmi_main_process_rx(ar, skb);
3011 }
3012} 3616}
3013 3617
3014int ath10k_wmi_connect(struct ath10k *ar) 3618int ath10k_wmi_connect(struct ath10k *ar)
@@ -3039,16 +3643,17 @@ int ath10k_wmi_connect(struct ath10k *ar)
3039 return 0; 3643 return 0;
3040} 3644}
3041 3645
3042static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd, 3646static struct sk_buff *
3043 u16 rd2g, u16 rd5g, u16 ctl2g, 3647ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
3044 u16 ctl5g) 3648 u16 ctl2g, u16 ctl5g,
3649 enum wmi_dfs_region dfs_reg)
3045{ 3650{
3046 struct wmi_pdev_set_regdomain_cmd *cmd; 3651 struct wmi_pdev_set_regdomain_cmd *cmd;
3047 struct sk_buff *skb; 3652 struct sk_buff *skb;
3048 3653
3049 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 3654 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3050 if (!skb) 3655 if (!skb)
3051 return -ENOMEM; 3656 return ERR_PTR(-ENOMEM);
3052 3657
3053 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 3658 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
3054 cmd->reg_domain = __cpu_to_le32(rd); 3659 cmd->reg_domain = __cpu_to_le32(rd);
@@ -3060,22 +3665,20 @@ static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
3060 ath10k_dbg(ar, ATH10K_DBG_WMI, 3665 ath10k_dbg(ar, ATH10K_DBG_WMI,
3061 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", 3666 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
3062 rd, rd2g, rd5g, ctl2g, ctl5g); 3667 rd, rd2g, rd5g, ctl2g, ctl5g);
3063 3668 return skb;
3064 return ath10k_wmi_cmd_send(ar, skb,
3065 ar->wmi.cmd->pdev_set_regdomain_cmdid);
3066} 3669}
3067 3670
3068static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd, 3671static struct sk_buff *
3069 u16 rd2g, u16 rd5g, 3672ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
3070 u16 ctl2g, u16 ctl5g, 3673 rd5g, u16 ctl2g, u16 ctl5g,
3071 enum wmi_dfs_region dfs_reg) 3674 enum wmi_dfs_region dfs_reg)
3072{ 3675{
3073 struct wmi_pdev_set_regdomain_cmd_10x *cmd; 3676 struct wmi_pdev_set_regdomain_cmd_10x *cmd;
3074 struct sk_buff *skb; 3677 struct sk_buff *skb;
3075 3678
3076 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 3679 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3077 if (!skb) 3680 if (!skb)
3078 return -ENOMEM; 3681 return ERR_PTR(-ENOMEM);
3079 3682
3080 cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data; 3683 cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
3081 cmd->reg_domain = __cpu_to_le32(rd); 3684 cmd->reg_domain = __cpu_to_le32(rd);
@@ -3088,50 +3691,39 @@ static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
3088 ath10k_dbg(ar, ATH10K_DBG_WMI, 3691 ath10k_dbg(ar, ATH10K_DBG_WMI,
3089 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n", 3692 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
3090 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg); 3693 rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
3091 3694 return skb;
3092 return ath10k_wmi_cmd_send(ar, skb,
3093 ar->wmi.cmd->pdev_set_regdomain_cmdid);
3094}
3095
3096int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
3097 u16 rd5g, u16 ctl2g, u16 ctl5g,
3098 enum wmi_dfs_region dfs_reg)
3099{
3100 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
3101 return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
3102 ctl2g, ctl5g, dfs_reg);
3103 else
3104 return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
3105 ctl2g, ctl5g);
3106} 3695}
3107 3696
3108int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 3697static struct sk_buff *
3698ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
3109{ 3699{
3110 struct wmi_pdev_suspend_cmd *cmd; 3700 struct wmi_pdev_suspend_cmd *cmd;
3111 struct sk_buff *skb; 3701 struct sk_buff *skb;
3112 3702
3113 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 3703 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3114 if (!skb) 3704 if (!skb)
3115 return -ENOMEM; 3705 return ERR_PTR(-ENOMEM);
3116 3706
3117 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 3707 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
3118 cmd->suspend_opt = __cpu_to_le32(suspend_opt); 3708 cmd->suspend_opt = __cpu_to_le32(suspend_opt);
3119 3709
3120 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 3710 return skb;
3121} 3711}
3122 3712
3123int ath10k_wmi_pdev_resume_target(struct ath10k *ar) 3713static struct sk_buff *
3714ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
3124{ 3715{
3125 struct sk_buff *skb; 3716 struct sk_buff *skb;
3126 3717
3127 skb = ath10k_wmi_alloc_skb(ar, 0); 3718 skb = ath10k_wmi_alloc_skb(ar, 0);
3128 if (skb == NULL) 3719 if (!skb)
3129 return -ENOMEM; 3720 return ERR_PTR(-ENOMEM);
3130 3721
3131 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 3722 return skb;
3132} 3723}
3133 3724
3134int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 3725static struct sk_buff *
3726ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
3135{ 3727{
3136 struct wmi_pdev_set_param_cmd *cmd; 3728 struct wmi_pdev_set_param_cmd *cmd;
3137 struct sk_buff *skb; 3729 struct sk_buff *skb;
@@ -3139,12 +3731,12 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
3139 if (id == WMI_PDEV_PARAM_UNSUPPORTED) { 3731 if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
3140 ath10k_warn(ar, "pdev param %d not supported by firmware\n", 3732 ath10k_warn(ar, "pdev param %d not supported by firmware\n",
3141 id); 3733 id);
3142 return -EOPNOTSUPP; 3734 return ERR_PTR(-EOPNOTSUPP);
3143 } 3735 }
3144 3736
3145 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 3737 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3146 if (!skb) 3738 if (!skb)
3147 return -ENOMEM; 3739 return ERR_PTR(-ENOMEM);
3148 3740
3149 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 3741 cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
3150 cmd->param_id = __cpu_to_le32(id); 3742 cmd->param_id = __cpu_to_le32(id);
@@ -3152,11 +3744,11 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
3152 3744
3153 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", 3745 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
3154 id, value); 3746 id, value);
3155 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 3747 return skb;
3156} 3748}
3157 3749
3158static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar, 3750void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
3159 struct wmi_host_mem_chunks *chunks) 3751 struct wmi_host_mem_chunks *chunks)
3160{ 3752{
3161 struct host_memory_chunk *chunk; 3753 struct host_memory_chunk *chunk;
3162 int i; 3754 int i;
@@ -3177,7 +3769,7 @@ static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
3177 } 3769 }
3178} 3770}
3179 3771
3180static int ath10k_wmi_main_cmd_init(struct ath10k *ar) 3772static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
3181{ 3773{
3182 struct wmi_init_cmd *cmd; 3774 struct wmi_init_cmd *cmd;
3183 struct sk_buff *buf; 3775 struct sk_buff *buf;
@@ -3240,7 +3832,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
3240 3832
3241 buf = ath10k_wmi_alloc_skb(ar, len); 3833 buf = ath10k_wmi_alloc_skb(ar, len);
3242 if (!buf) 3834 if (!buf)
3243 return -ENOMEM; 3835 return ERR_PTR(-ENOMEM);
3244 3836
3245 cmd = (struct wmi_init_cmd *)buf->data; 3837 cmd = (struct wmi_init_cmd *)buf->data;
3246 3838
@@ -3248,10 +3840,10 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
3248 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); 3840 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
3249 3841
3250 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n"); 3842 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
3251 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 3843 return buf;
3252} 3844}
3253 3845
3254static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) 3846static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
3255{ 3847{
3256 struct wmi_init_cmd_10x *cmd; 3848 struct wmi_init_cmd_10x *cmd;
3257 struct sk_buff *buf; 3849 struct sk_buff *buf;
@@ -3306,7 +3898,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
3306 3898
3307 buf = ath10k_wmi_alloc_skb(ar, len); 3899 buf = ath10k_wmi_alloc_skb(ar, len);
3308 if (!buf) 3900 if (!buf)
3309 return -ENOMEM; 3901 return ERR_PTR(-ENOMEM);
3310 3902
3311 cmd = (struct wmi_init_cmd_10x *)buf->data; 3903 cmd = (struct wmi_init_cmd_10x *)buf->data;
3312 3904
@@ -3314,15 +3906,15 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
3314 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); 3906 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
3315 3907
3316 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n"); 3908 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
3317 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 3909 return buf;
3318} 3910}
3319 3911
3320static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar) 3912static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
3321{ 3913{
3322 struct wmi_init_cmd_10_2 *cmd; 3914 struct wmi_init_cmd_10_2 *cmd;
3323 struct sk_buff *buf; 3915 struct sk_buff *buf;
3324 struct wmi_resource_config_10x config = {}; 3916 struct wmi_resource_config_10x config = {};
3325 u32 len, val; 3917 u32 len, val, features;
3326 3918
3327 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); 3919 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
3328 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); 3920 config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
@@ -3356,7 +3948,7 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
3356 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); 3948 config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
3357 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); 3949 config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
3358 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); 3950 config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
3359 config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); 3951 config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
3360 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); 3952 config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
3361 3953
3362 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 3954 val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
@@ -3372,34 +3964,21 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
3372 3964
3373 buf = ath10k_wmi_alloc_skb(ar, len); 3965 buf = ath10k_wmi_alloc_skb(ar, len);
3374 if (!buf) 3966 if (!buf)
3375 return -ENOMEM; 3967 return ERR_PTR(-ENOMEM);
3376 3968
3377 cmd = (struct wmi_init_cmd_10_2 *)buf->data; 3969 cmd = (struct wmi_init_cmd_10_2 *)buf->data;
3378 3970
3971 features = WMI_10_2_RX_BATCH_MODE;
3972 cmd->resource_config.feature_mask = __cpu_to_le32(features);
3973
3379 memcpy(&cmd->resource_config.common, &config, sizeof(config)); 3974 memcpy(&cmd->resource_config.common, &config, sizeof(config));
3380 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); 3975 ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
3381 3976
3382 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n"); 3977 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
3383 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); 3978 return buf;
3384} 3979}
3385 3980
3386int ath10k_wmi_cmd_init(struct ath10k *ar) 3981int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
3387{
3388 int ret;
3389
3390 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
3391 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
3392 ret = ath10k_wmi_10_2_cmd_init(ar);
3393 else
3394 ret = ath10k_wmi_10x_cmd_init(ar);
3395 } else {
3396 ret = ath10k_wmi_main_cmd_init(ar);
3397 }
3398
3399 return ret;
3400}
3401
3402static int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
3403{ 3982{
3404 if (arg->ie_len && !arg->ie) 3983 if (arg->ie_len && !arg->ie)
3405 return -EINVAL; 3984 return -EINVAL;
@@ -3450,9 +4029,8 @@ ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
3450 return len; 4029 return len;
3451} 4030}
3452 4031
3453static void 4032void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
3454ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn, 4033 const struct wmi_start_scan_arg *arg)
3455 const struct wmi_start_scan_arg *arg)
3456{ 4034{
3457 u32 scan_id; 4035 u32 scan_id;
3458 u32 scan_req_id; 4036 u32 scan_req_id;
@@ -3546,46 +4124,60 @@ ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
3546 } 4124 }
3547} 4125}
3548 4126
3549int ath10k_wmi_start_scan(struct ath10k *ar, 4127static struct sk_buff *
3550 const struct wmi_start_scan_arg *arg) 4128ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
4129 const struct wmi_start_scan_arg *arg)
3551{ 4130{
4131 struct wmi_start_scan_cmd *cmd;
3552 struct sk_buff *skb; 4132 struct sk_buff *skb;
3553 size_t len; 4133 size_t len;
3554 int ret; 4134 int ret;
3555 4135
3556 ret = ath10k_wmi_start_scan_verify(arg); 4136 ret = ath10k_wmi_start_scan_verify(arg);
3557 if (ret) 4137 if (ret)
3558 return ret; 4138 return ERR_PTR(ret);
3559
3560 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
3561 len = sizeof(struct wmi_10x_start_scan_cmd) +
3562 ath10k_wmi_start_scan_tlvs_len(arg);
3563 else
3564 len = sizeof(struct wmi_start_scan_cmd) +
3565 ath10k_wmi_start_scan_tlvs_len(arg);
3566 4139
4140 len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
3567 skb = ath10k_wmi_alloc_skb(ar, len); 4141 skb = ath10k_wmi_alloc_skb(ar, len);
3568 if (!skb) 4142 if (!skb)
3569 return -ENOMEM; 4143 return ERR_PTR(-ENOMEM);
3570
3571 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
3572 struct wmi_10x_start_scan_cmd *cmd;
3573 4144
3574 cmd = (struct wmi_10x_start_scan_cmd *)skb->data; 4145 cmd = (struct wmi_start_scan_cmd *)skb->data;
3575 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
3576 ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
3577 } else {
3578 struct wmi_start_scan_cmd *cmd;
3579 4146
3580 cmd = (struct wmi_start_scan_cmd *)skb->data; 4147 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
3581 cmd->burst_duration_ms = __cpu_to_le32(0); 4148 ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
3582 4149
3583 ath10k_wmi_put_start_scan_common(&cmd->common, arg); 4150 cmd->burst_duration_ms = __cpu_to_le32(0);
3584 ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
3585 }
3586 4151
3587 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n"); 4152 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
3588 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 4153 return skb;
4154}
4155
4156static struct sk_buff *
4157ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
4158 const struct wmi_start_scan_arg *arg)
4159{
4160 struct wmi_10x_start_scan_cmd *cmd;
4161 struct sk_buff *skb;
4162 size_t len;
4163 int ret;
4164
4165 ret = ath10k_wmi_start_scan_verify(arg);
4166 if (ret)
4167 return ERR_PTR(ret);
4168
4169 len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
4170 skb = ath10k_wmi_alloc_skb(ar, len);
4171 if (!skb)
4172 return ERR_PTR(-ENOMEM);
4173
4174 cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
4175
4176 ath10k_wmi_put_start_scan_common(&cmd->common, arg);
4177 ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
4178
4179 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
4180 return skb;
3589} 4181}
3590 4182
3591void ath10k_wmi_start_scan_init(struct ath10k *ar, 4183void ath10k_wmi_start_scan_init(struct ath10k *ar,
@@ -3614,7 +4206,9 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
3614 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; 4206 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
3615} 4207}
3616 4208
3617int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 4209static struct sk_buff *
4210ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
4211 const struct wmi_stop_scan_arg *arg)
3618{ 4212{
3619 struct wmi_stop_scan_cmd *cmd; 4213 struct wmi_stop_scan_cmd *cmd;
3620 struct sk_buff *skb; 4214 struct sk_buff *skb;
@@ -3622,13 +4216,13 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
3622 u32 req_id; 4216 u32 req_id;
3623 4217
3624 if (arg->req_id > 0xFFF) 4218 if (arg->req_id > 0xFFF)
3625 return -EINVAL; 4219 return ERR_PTR(-EINVAL);
3626 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) 4220 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
3627 return -EINVAL; 4221 return ERR_PTR(-EINVAL);
3628 4222
3629 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4223 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3630 if (!skb) 4224 if (!skb)
3631 return -ENOMEM; 4225 return ERR_PTR(-ENOMEM);
3632 4226
3633 scan_id = arg->u.scan_id; 4227 scan_id = arg->u.scan_id;
3634 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; 4228 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
@@ -3645,20 +4239,21 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
3645 ath10k_dbg(ar, ATH10K_DBG_WMI, 4239 ath10k_dbg(ar, ATH10K_DBG_WMI,
3646 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", 4240 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
3647 arg->req_id, arg->req_type, arg->u.scan_id); 4241 arg->req_id, arg->req_type, arg->u.scan_id);
3648 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 4242 return skb;
3649} 4243}
3650 4244
3651int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 4245static struct sk_buff *
3652 enum wmi_vdev_type type, 4246ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
3653 enum wmi_vdev_subtype subtype, 4247 enum wmi_vdev_type type,
3654 const u8 macaddr[ETH_ALEN]) 4248 enum wmi_vdev_subtype subtype,
4249 const u8 macaddr[ETH_ALEN])
3655{ 4250{
3656 struct wmi_vdev_create_cmd *cmd; 4251 struct wmi_vdev_create_cmd *cmd;
3657 struct sk_buff *skb; 4252 struct sk_buff *skb;
3658 4253
3659 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4254 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3660 if (!skb) 4255 if (!skb)
3661 return -ENOMEM; 4256 return ERR_PTR(-ENOMEM);
3662 4257
3663 cmd = (struct wmi_vdev_create_cmd *)skb->data; 4258 cmd = (struct wmi_vdev_create_cmd *)skb->data;
3664 cmd->vdev_id = __cpu_to_le32(vdev_id); 4259 cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -3669,58 +4264,52 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
3669 ath10k_dbg(ar, ATH10K_DBG_WMI, 4264 ath10k_dbg(ar, ATH10K_DBG_WMI,
3670 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", 4265 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
3671 vdev_id, type, subtype, macaddr); 4266 vdev_id, type, subtype, macaddr);
3672 4267 return skb;
3673 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
3674} 4268}
3675 4269
3676int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 4270static struct sk_buff *
4271ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
3677{ 4272{
3678 struct wmi_vdev_delete_cmd *cmd; 4273 struct wmi_vdev_delete_cmd *cmd;
3679 struct sk_buff *skb; 4274 struct sk_buff *skb;
3680 4275
3681 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4276 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3682 if (!skb) 4277 if (!skb)
3683 return -ENOMEM; 4278 return ERR_PTR(-ENOMEM);
3684 4279
3685 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 4280 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
3686 cmd->vdev_id = __cpu_to_le32(vdev_id); 4281 cmd->vdev_id = __cpu_to_le32(vdev_id);
3687 4282
3688 ath10k_dbg(ar, ATH10K_DBG_WMI, 4283 ath10k_dbg(ar, ATH10K_DBG_WMI,
3689 "WMI vdev delete id %d\n", vdev_id); 4284 "WMI vdev delete id %d\n", vdev_id);
3690 4285 return skb;
3691 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
3692} 4286}
3693 4287
3694static int 4288static struct sk_buff *
3695ath10k_wmi_vdev_start_restart(struct ath10k *ar, 4289ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
3696 const struct wmi_vdev_start_request_arg *arg, 4290 const struct wmi_vdev_start_request_arg *arg,
3697 u32 cmd_id) 4291 bool restart)
3698{ 4292{
3699 struct wmi_vdev_start_request_cmd *cmd; 4293 struct wmi_vdev_start_request_cmd *cmd;
3700 struct sk_buff *skb; 4294 struct sk_buff *skb;
3701 const char *cmdname; 4295 const char *cmdname;
3702 u32 flags = 0; 4296 u32 flags = 0;
3703 4297
3704 if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
3705 cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
3706 return -EINVAL;
3707 if (WARN_ON(arg->ssid && arg->ssid_len == 0)) 4298 if (WARN_ON(arg->ssid && arg->ssid_len == 0))
3708 return -EINVAL; 4299 return ERR_PTR(-EINVAL);
3709 if (WARN_ON(arg->hidden_ssid && !arg->ssid)) 4300 if (WARN_ON(arg->hidden_ssid && !arg->ssid))
3710 return -EINVAL; 4301 return ERR_PTR(-EINVAL);
3711 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 4302 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
3712 return -EINVAL; 4303 return ERR_PTR(-EINVAL);
3713 4304
3714 if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid) 4305 if (restart)
3715 cmdname = "start";
3716 else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
3717 cmdname = "restart"; 4306 cmdname = "restart";
3718 else 4307 else
3719 return -EINVAL; /* should not happen, we already check cmd_id */ 4308 cmdname = "start";
3720 4309
3721 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4310 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3722 if (!skb) 4311 if (!skb)
3723 return -ENOMEM; 4312 return ERR_PTR(-ENOMEM);
3724 4313
3725 if (arg->hidden_ssid) 4314 if (arg->hidden_ssid)
3726 flags |= WMI_VDEV_START_HIDDEN_SSID; 4315 flags |= WMI_VDEV_START_HIDDEN_SSID;
@@ -3749,50 +4338,36 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
3749 flags, arg->channel.freq, arg->channel.mode, 4338 flags, arg->channel.freq, arg->channel.mode,
3750 cmd->chan.flags, arg->channel.max_power); 4339 cmd->chan.flags, arg->channel.max_power);
3751 4340
3752 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 4341 return skb;
3753}
3754
3755int ath10k_wmi_vdev_start(struct ath10k *ar,
3756 const struct wmi_vdev_start_request_arg *arg)
3757{
3758 u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
3759
3760 return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
3761}
3762
3763int ath10k_wmi_vdev_restart(struct ath10k *ar,
3764 const struct wmi_vdev_start_request_arg *arg)
3765{
3766 u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
3767
3768 return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
3769} 4342}
3770 4343
3771int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 4344static struct sk_buff *
4345ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
3772{ 4346{
3773 struct wmi_vdev_stop_cmd *cmd; 4347 struct wmi_vdev_stop_cmd *cmd;
3774 struct sk_buff *skb; 4348 struct sk_buff *skb;
3775 4349
3776 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4350 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3777 if (!skb) 4351 if (!skb)
3778 return -ENOMEM; 4352 return ERR_PTR(-ENOMEM);
3779 4353
3780 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 4354 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
3781 cmd->vdev_id = __cpu_to_le32(vdev_id); 4355 cmd->vdev_id = __cpu_to_le32(vdev_id);
3782 4356
3783 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); 4357 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
3784 4358 return skb;
3785 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
3786} 4359}
3787 4360
3788int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 4361static struct sk_buff *
4362ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
4363 const u8 *bssid)
3789{ 4364{
3790 struct wmi_vdev_up_cmd *cmd; 4365 struct wmi_vdev_up_cmd *cmd;
3791 struct sk_buff *skb; 4366 struct sk_buff *skb;
3792 4367
3793 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4368 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3794 if (!skb) 4369 if (!skb)
3795 return -ENOMEM; 4370 return ERR_PTR(-ENOMEM);
3796 4371
3797 cmd = (struct wmi_vdev_up_cmd *)skb->data; 4372 cmd = (struct wmi_vdev_up_cmd *)skb->data;
3798 cmd->vdev_id = __cpu_to_le32(vdev_id); 4373 cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -3802,30 +4377,30 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
3802 ath10k_dbg(ar, ATH10K_DBG_WMI, 4377 ath10k_dbg(ar, ATH10K_DBG_WMI,
3803 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 4378 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
3804 vdev_id, aid, bssid); 4379 vdev_id, aid, bssid);
3805 4380 return skb;
3806 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
3807} 4381}
3808 4382
3809int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 4383static struct sk_buff *
4384ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
3810{ 4385{
3811 struct wmi_vdev_down_cmd *cmd; 4386 struct wmi_vdev_down_cmd *cmd;
3812 struct sk_buff *skb; 4387 struct sk_buff *skb;
3813 4388
3814 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4389 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3815 if (!skb) 4390 if (!skb)
3816 return -ENOMEM; 4391 return ERR_PTR(-ENOMEM);
3817 4392
3818 cmd = (struct wmi_vdev_down_cmd *)skb->data; 4393 cmd = (struct wmi_vdev_down_cmd *)skb->data;
3819 cmd->vdev_id = __cpu_to_le32(vdev_id); 4394 cmd->vdev_id = __cpu_to_le32(vdev_id);
3820 4395
3821 ath10k_dbg(ar, ATH10K_DBG_WMI, 4396 ath10k_dbg(ar, ATH10K_DBG_WMI,
3822 "wmi mgmt vdev down id 0x%x\n", vdev_id); 4397 "wmi mgmt vdev down id 0x%x\n", vdev_id);
3823 4398 return skb;
3824 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
3825} 4399}
3826 4400
3827int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, 4401static struct sk_buff *
3828 u32 param_id, u32 param_value) 4402ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
4403 u32 param_id, u32 param_value)
3829{ 4404{
3830 struct wmi_vdev_set_param_cmd *cmd; 4405 struct wmi_vdev_set_param_cmd *cmd;
3831 struct sk_buff *skb; 4406 struct sk_buff *skb;
@@ -3834,12 +4409,12 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
3834 ath10k_dbg(ar, ATH10K_DBG_WMI, 4409 ath10k_dbg(ar, ATH10K_DBG_WMI,
3835 "vdev param %d not supported by firmware\n", 4410 "vdev param %d not supported by firmware\n",
3836 param_id); 4411 param_id);
3837 return -EOPNOTSUPP; 4412 return ERR_PTR(-EOPNOTSUPP);
3838 } 4413 }
3839 4414
3840 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4415 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3841 if (!skb) 4416 if (!skb)
3842 return -ENOMEM; 4417 return ERR_PTR(-ENOMEM);
3843 4418
3844 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 4419 cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
3845 cmd->vdev_id = __cpu_to_le32(vdev_id); 4420 cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -3849,24 +4424,24 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
3849 ath10k_dbg(ar, ATH10K_DBG_WMI, 4424 ath10k_dbg(ar, ATH10K_DBG_WMI,
3850 "wmi vdev id 0x%x set param %d value %d\n", 4425 "wmi vdev id 0x%x set param %d value %d\n",
3851 vdev_id, param_id, param_value); 4426 vdev_id, param_id, param_value);
3852 4427 return skb;
3853 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
3854} 4428}
3855 4429
3856int ath10k_wmi_vdev_install_key(struct ath10k *ar, 4430static struct sk_buff *
3857 const struct wmi_vdev_install_key_arg *arg) 4431ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
4432 const struct wmi_vdev_install_key_arg *arg)
3858{ 4433{
3859 struct wmi_vdev_install_key_cmd *cmd; 4434 struct wmi_vdev_install_key_cmd *cmd;
3860 struct sk_buff *skb; 4435 struct sk_buff *skb;
3861 4436
3862 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL) 4437 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
3863 return -EINVAL; 4438 return ERR_PTR(-EINVAL);
3864 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) 4439 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
3865 return -EINVAL; 4440 return ERR_PTR(-EINVAL);
3866 4441
3867 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len); 4442 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
3868 if (!skb) 4443 if (!skb)
3869 return -ENOMEM; 4444 return ERR_PTR(-ENOMEM);
3870 4445
3871 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 4446 cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
3872 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 4447 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
@@ -3885,20 +4460,19 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
3885 ath10k_dbg(ar, ATH10K_DBG_WMI, 4460 ath10k_dbg(ar, ATH10K_DBG_WMI,
3886 "wmi vdev install key idx %d cipher %d len %d\n", 4461 "wmi vdev install key idx %d cipher %d len %d\n",
3887 arg->key_idx, arg->key_cipher, arg->key_len); 4462 arg->key_idx, arg->key_cipher, arg->key_len);
3888 return ath10k_wmi_cmd_send(ar, skb, 4463 return skb;
3889 ar->wmi.cmd->vdev_install_key_cmdid);
3890} 4464}
3891 4465
3892int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 4466static struct sk_buff *
3893 const struct wmi_vdev_spectral_conf_arg *arg) 4467ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
4468 const struct wmi_vdev_spectral_conf_arg *arg)
3894{ 4469{
3895 struct wmi_vdev_spectral_conf_cmd *cmd; 4470 struct wmi_vdev_spectral_conf_cmd *cmd;
3896 struct sk_buff *skb; 4471 struct sk_buff *skb;
3897 u32 cmdid;
3898 4472
3899 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4473 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3900 if (!skb) 4474 if (!skb)
3901 return -ENOMEM; 4475 return ERR_PTR(-ENOMEM);
3902 4476
3903 cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data; 4477 cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
3904 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 4478 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
@@ -3921,39 +4495,38 @@ int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
3921 cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj); 4495 cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
3922 cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask); 4496 cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
3923 4497
3924 cmdid = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 4498 return skb;
3925 return ath10k_wmi_cmd_send(ar, skb, cmdid);
3926} 4499}
3927 4500
3928int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 4501static struct sk_buff *
3929 u32 enable) 4502ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
4503 u32 trigger, u32 enable)
3930{ 4504{
3931 struct wmi_vdev_spectral_enable_cmd *cmd; 4505 struct wmi_vdev_spectral_enable_cmd *cmd;
3932 struct sk_buff *skb; 4506 struct sk_buff *skb;
3933 u32 cmdid;
3934 4507
3935 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4508 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3936 if (!skb) 4509 if (!skb)
3937 return -ENOMEM; 4510 return ERR_PTR(-ENOMEM);
3938 4511
3939 cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data; 4512 cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
3940 cmd->vdev_id = __cpu_to_le32(vdev_id); 4513 cmd->vdev_id = __cpu_to_le32(vdev_id);
3941 cmd->trigger_cmd = __cpu_to_le32(trigger); 4514 cmd->trigger_cmd = __cpu_to_le32(trigger);
3942 cmd->enable_cmd = __cpu_to_le32(enable); 4515 cmd->enable_cmd = __cpu_to_le32(enable);
3943 4516
3944 cmdid = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 4517 return skb;
3945 return ath10k_wmi_cmd_send(ar, skb, cmdid);
3946} 4518}
3947 4519
3948int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 4520static struct sk_buff *
3949 const u8 peer_addr[ETH_ALEN]) 4521ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
4522 const u8 peer_addr[ETH_ALEN])
3950{ 4523{
3951 struct wmi_peer_create_cmd *cmd; 4524 struct wmi_peer_create_cmd *cmd;
3952 struct sk_buff *skb; 4525 struct sk_buff *skb;
3953 4526
3954 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4527 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3955 if (!skb) 4528 if (!skb)
3956 return -ENOMEM; 4529 return ERR_PTR(-ENOMEM);
3957 4530
3958 cmd = (struct wmi_peer_create_cmd *)skb->data; 4531 cmd = (struct wmi_peer_create_cmd *)skb->data;
3959 cmd->vdev_id = __cpu_to_le32(vdev_id); 4532 cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -3962,18 +4535,19 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
3962 ath10k_dbg(ar, ATH10K_DBG_WMI, 4535 ath10k_dbg(ar, ATH10K_DBG_WMI,
3963 "wmi peer create vdev_id %d peer_addr %pM\n", 4536 "wmi peer create vdev_id %d peer_addr %pM\n",
3964 vdev_id, peer_addr); 4537 vdev_id, peer_addr);
3965 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 4538 return skb;
3966} 4539}
3967 4540
3968int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 4541static struct sk_buff *
3969 const u8 peer_addr[ETH_ALEN]) 4542ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
4543 const u8 peer_addr[ETH_ALEN])
3970{ 4544{
3971 struct wmi_peer_delete_cmd *cmd; 4545 struct wmi_peer_delete_cmd *cmd;
3972 struct sk_buff *skb; 4546 struct sk_buff *skb;
3973 4547
3974 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4548 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3975 if (!skb) 4549 if (!skb)
3976 return -ENOMEM; 4550 return ERR_PTR(-ENOMEM);
3977 4551
3978 cmd = (struct wmi_peer_delete_cmd *)skb->data; 4552 cmd = (struct wmi_peer_delete_cmd *)skb->data;
3979 cmd->vdev_id = __cpu_to_le32(vdev_id); 4553 cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -3982,18 +4556,19 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
3982 ath10k_dbg(ar, ATH10K_DBG_WMI, 4556 ath10k_dbg(ar, ATH10K_DBG_WMI,
3983 "wmi peer delete vdev_id %d peer_addr %pM\n", 4557 "wmi peer delete vdev_id %d peer_addr %pM\n",
3984 vdev_id, peer_addr); 4558 vdev_id, peer_addr);
3985 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 4559 return skb;
3986} 4560}
3987 4561
3988int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 4562static struct sk_buff *
3989 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 4563ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
4564 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
3990{ 4565{
3991 struct wmi_peer_flush_tids_cmd *cmd; 4566 struct wmi_peer_flush_tids_cmd *cmd;
3992 struct sk_buff *skb; 4567 struct sk_buff *skb;
3993 4568
3994 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4569 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
3995 if (!skb) 4570 if (!skb)
3996 return -ENOMEM; 4571 return ERR_PTR(-ENOMEM);
3997 4572
3998 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 4573 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
3999 cmd->vdev_id = __cpu_to_le32(vdev_id); 4574 cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -4003,19 +4578,21 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
4003 ath10k_dbg(ar, ATH10K_DBG_WMI, 4578 ath10k_dbg(ar, ATH10K_DBG_WMI,
4004 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", 4579 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
4005 vdev_id, peer_addr, tid_bitmap); 4580 vdev_id, peer_addr, tid_bitmap);
4006 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 4581 return skb;
4007} 4582}
4008 4583
4009int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, 4584static struct sk_buff *
4010 const u8 *peer_addr, enum wmi_peer_param param_id, 4585ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
4011 u32 param_value) 4586 const u8 *peer_addr,
4587 enum wmi_peer_param param_id,
4588 u32 param_value)
4012{ 4589{
4013 struct wmi_peer_set_param_cmd *cmd; 4590 struct wmi_peer_set_param_cmd *cmd;
4014 struct sk_buff *skb; 4591 struct sk_buff *skb;
4015 4592
4016 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4593 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4017 if (!skb) 4594 if (!skb)
4018 return -ENOMEM; 4595 return ERR_PTR(-ENOMEM);
4019 4596
4020 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 4597 cmd = (struct wmi_peer_set_param_cmd *)skb->data;
4021 cmd->vdev_id = __cpu_to_le32(vdev_id); 4598 cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -4026,19 +4603,19 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
4026 ath10k_dbg(ar, ATH10K_DBG_WMI, 4603 ath10k_dbg(ar, ATH10K_DBG_WMI,
4027 "wmi vdev %d peer 0x%pM set param %d value %d\n", 4604 "wmi vdev %d peer 0x%pM set param %d value %d\n",
4028 vdev_id, peer_addr, param_id, param_value); 4605 vdev_id, peer_addr, param_id, param_value);
4029 4606 return skb;
4030 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
4031} 4607}
4032 4608
4033int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 4609static struct sk_buff *
4034 enum wmi_sta_ps_mode psmode) 4610ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
4611 enum wmi_sta_ps_mode psmode)
4035{ 4612{
4036 struct wmi_sta_powersave_mode_cmd *cmd; 4613 struct wmi_sta_powersave_mode_cmd *cmd;
4037 struct sk_buff *skb; 4614 struct sk_buff *skb;
4038 4615
4039 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4616 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4040 if (!skb) 4617 if (!skb)
4041 return -ENOMEM; 4618 return ERR_PTR(-ENOMEM);
4042 4619
4043 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data; 4620 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
4044 cmd->vdev_id = __cpu_to_le32(vdev_id); 4621 cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -4047,21 +4624,20 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
4047 ath10k_dbg(ar, ATH10K_DBG_WMI, 4624 ath10k_dbg(ar, ATH10K_DBG_WMI,
4048 "wmi set powersave id 0x%x mode %d\n", 4625 "wmi set powersave id 0x%x mode %d\n",
4049 vdev_id, psmode); 4626 vdev_id, psmode);
4050 4627 return skb;
4051 return ath10k_wmi_cmd_send(ar, skb,
4052 ar->wmi.cmd->sta_powersave_mode_cmdid);
4053} 4628}
4054 4629
4055int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 4630static struct sk_buff *
4056 enum wmi_sta_powersave_param param_id, 4631ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
4057 u32 value) 4632 enum wmi_sta_powersave_param param_id,
4633 u32 value)
4058{ 4634{
4059 struct wmi_sta_powersave_param_cmd *cmd; 4635 struct wmi_sta_powersave_param_cmd *cmd;
4060 struct sk_buff *skb; 4636 struct sk_buff *skb;
4061 4637
4062 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4638 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4063 if (!skb) 4639 if (!skb)
4064 return -ENOMEM; 4640 return ERR_PTR(-ENOMEM);
4065 4641
4066 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 4642 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
4067 cmd->vdev_id = __cpu_to_le32(vdev_id); 4643 cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -4071,22 +4647,22 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
4071 ath10k_dbg(ar, ATH10K_DBG_WMI, 4647 ath10k_dbg(ar, ATH10K_DBG_WMI,
4072 "wmi sta ps param vdev_id 0x%x param %d value %d\n", 4648 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
4073 vdev_id, param_id, value); 4649 vdev_id, param_id, value);
4074 return ath10k_wmi_cmd_send(ar, skb, 4650 return skb;
4075 ar->wmi.cmd->sta_powersave_param_cmdid);
4076} 4651}
4077 4652
4078int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 4653static struct sk_buff *
4079 enum wmi_ap_ps_peer_param param_id, u32 value) 4654ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
4655 enum wmi_ap_ps_peer_param param_id, u32 value)
4080{ 4656{
4081 struct wmi_ap_ps_peer_cmd *cmd; 4657 struct wmi_ap_ps_peer_cmd *cmd;
4082 struct sk_buff *skb; 4658 struct sk_buff *skb;
4083 4659
4084 if (!mac) 4660 if (!mac)
4085 return -EINVAL; 4661 return ERR_PTR(-EINVAL);
4086 4662
4087 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4663 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4088 if (!skb) 4664 if (!skb)
4089 return -ENOMEM; 4665 return ERR_PTR(-ENOMEM);
4090 4666
4091 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 4667 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
4092 cmd->vdev_id = __cpu_to_le32(vdev_id); 4668 cmd->vdev_id = __cpu_to_le32(vdev_id);
@@ -4097,13 +4673,12 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
4097 ath10k_dbg(ar, ATH10K_DBG_WMI, 4673 ath10k_dbg(ar, ATH10K_DBG_WMI,
4098 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", 4674 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
4099 vdev_id, param_id, value, mac); 4675 vdev_id, param_id, value, mac);
4100 4676 return skb;
4101 return ath10k_wmi_cmd_send(ar, skb,
4102 ar->wmi.cmd->ap_ps_peer_param_cmdid);
4103} 4677}
4104 4678
4105int ath10k_wmi_scan_chan_list(struct ath10k *ar, 4679static struct sk_buff *
4106 const struct wmi_scan_chan_list_arg *arg) 4680ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
4681 const struct wmi_scan_chan_list_arg *arg)
4107{ 4682{
4108 struct wmi_scan_chan_list_cmd *cmd; 4683 struct wmi_scan_chan_list_cmd *cmd;
4109 struct sk_buff *skb; 4684 struct sk_buff *skb;
@@ -4116,7 +4691,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
4116 4691
4117 skb = ath10k_wmi_alloc_skb(ar, len); 4692 skb = ath10k_wmi_alloc_skb(ar, len);
4118 if (!skb) 4693 if (!skb)
4119 return -EINVAL; 4694 return ERR_PTR(-EINVAL);
4120 4695
4121 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 4696 cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
4122 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); 4697 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
@@ -4128,7 +4703,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
4128 ath10k_wmi_put_wmi_channel(ci, ch); 4703 ath10k_wmi_put_wmi_channel(ci, ch);
4129 } 4704 }
4130 4705
4131 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 4706 return skb;
4132} 4707}
4133 4708
4134static void 4709static void
@@ -4209,12 +4784,9 @@ ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
4209 cmd->info0 = __cpu_to_le32(info0); 4784 cmd->info0 = __cpu_to_le32(info0);
4210} 4785}
4211 4786
4212int ath10k_wmi_peer_assoc(struct ath10k *ar, 4787static int
4213 const struct wmi_peer_assoc_complete_arg *arg) 4788ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
4214{ 4789{
4215 struct sk_buff *skb;
4216 int len;
4217
4218 if (arg->peer_mpdu_density > 16) 4790 if (arg->peer_mpdu_density > 16)
4219 return -EINVAL; 4791 return -EINVAL;
4220 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) 4792 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
@@ -4222,79 +4794,135 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
4222 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) 4794 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
4223 return -EINVAL; 4795 return -EINVAL;
4224 4796
4225 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 4797 return 0;
4226 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) 4798}
4227 len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd); 4799
4228 else 4800static struct sk_buff *
4229 len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd); 4801ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
4230 } else { 4802 const struct wmi_peer_assoc_complete_arg *arg)
4231 len = sizeof(struct wmi_main_peer_assoc_complete_cmd); 4803{
4232 } 4804 size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
4805 struct sk_buff *skb;
4806 int ret;
4807
4808 ret = ath10k_wmi_peer_assoc_check_arg(arg);
4809 if (ret)
4810 return ERR_PTR(ret);
4233 4811
4234 skb = ath10k_wmi_alloc_skb(ar, len); 4812 skb = ath10k_wmi_alloc_skb(ar, len);
4235 if (!skb) 4813 if (!skb)
4236 return -ENOMEM; 4814 return ERR_PTR(-ENOMEM);
4237 4815
4238 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 4816 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
4239 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) 4817
4240 ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg); 4818 ath10k_dbg(ar, ATH10K_DBG_WMI,
4241 else 4819 "wmi peer assoc vdev %d addr %pM (%s)\n",
4242 ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); 4820 arg->vdev_id, arg->addr,
4243 } else { 4821 arg->peer_reassoc ? "reassociate" : "new");
4244 ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg); 4822 return skb;
4245 } 4823}
4824
4825static struct sk_buff *
4826ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
4827 const struct wmi_peer_assoc_complete_arg *arg)
4828{
4829 size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
4830 struct sk_buff *skb;
4831 int ret;
4832
4833 ret = ath10k_wmi_peer_assoc_check_arg(arg);
4834 if (ret)
4835 return ERR_PTR(ret);
4836
4837 skb = ath10k_wmi_alloc_skb(ar, len);
4838 if (!skb)
4839 return ERR_PTR(-ENOMEM);
4840
4841 ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
4842
4843 ath10k_dbg(ar, ATH10K_DBG_WMI,
4844 "wmi peer assoc vdev %d addr %pM (%s)\n",
4845 arg->vdev_id, arg->addr,
4846 arg->peer_reassoc ? "reassociate" : "new");
4847 return skb;
4848}
4849
4850static struct sk_buff *
4851ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
4852 const struct wmi_peer_assoc_complete_arg *arg)
4853{
4854 size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
4855 struct sk_buff *skb;
4856 int ret;
4857
4858 ret = ath10k_wmi_peer_assoc_check_arg(arg);
4859 if (ret)
4860 return ERR_PTR(ret);
4861
4862 skb = ath10k_wmi_alloc_skb(ar, len);
4863 if (!skb)
4864 return ERR_PTR(-ENOMEM);
4865
4866 ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
4246 4867
4247 ath10k_dbg(ar, ATH10K_DBG_WMI, 4868 ath10k_dbg(ar, ATH10K_DBG_WMI,
4248 "wmi peer assoc vdev %d addr %pM (%s)\n", 4869 "wmi peer assoc vdev %d addr %pM (%s)\n",
4249 arg->vdev_id, arg->addr, 4870 arg->vdev_id, arg->addr,
4250 arg->peer_reassoc ? "reassociate" : "new"); 4871 arg->peer_reassoc ? "reassociate" : "new");
4251 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 4872 return skb;
4873}
4874
4875static struct sk_buff *
4876ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
4877{
4878 struct sk_buff *skb;
4879
4880 skb = ath10k_wmi_alloc_skb(ar, 0);
4881 if (!skb)
4882 return ERR_PTR(-ENOMEM);
4883
4884 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
4885 return skb;
4252} 4886}
4253 4887
4254/* This function assumes the beacon is already DMA mapped */ 4888/* This function assumes the beacon is already DMA mapped */
4255int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) 4889static struct sk_buff *
4890ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
4891 size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
4892 bool deliver_cab)
4256{ 4893{
4257 struct wmi_bcn_tx_ref_cmd *cmd; 4894 struct wmi_bcn_tx_ref_cmd *cmd;
4258 struct sk_buff *skb; 4895 struct sk_buff *skb;
4259 struct sk_buff *beacon = arvif->beacon;
4260 struct ath10k *ar = arvif->ar;
4261 struct ieee80211_hdr *hdr; 4896 struct ieee80211_hdr *hdr;
4262 int ret;
4263 u16 fc; 4897 u16 fc;
4264 4898
4265 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4899 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4266 if (!skb) 4900 if (!skb)
4267 return -ENOMEM; 4901 return ERR_PTR(-ENOMEM);
4268 4902
4269 hdr = (struct ieee80211_hdr *)beacon->data; 4903 hdr = (struct ieee80211_hdr *)bcn;
4270 fc = le16_to_cpu(hdr->frame_control); 4904 fc = le16_to_cpu(hdr->frame_control);
4271 4905
4272 cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data; 4906 cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
4273 cmd->vdev_id = __cpu_to_le32(arvif->vdev_id); 4907 cmd->vdev_id = __cpu_to_le32(vdev_id);
4274 cmd->data_len = __cpu_to_le32(beacon->len); 4908 cmd->data_len = __cpu_to_le32(bcn_len);
4275 cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr); 4909 cmd->data_ptr = __cpu_to_le32(bcn_paddr);
4276 cmd->msdu_id = 0; 4910 cmd->msdu_id = 0;
4277 cmd->frame_control = __cpu_to_le32(fc); 4911 cmd->frame_control = __cpu_to_le32(fc);
4278 cmd->flags = 0; 4912 cmd->flags = 0;
4279 cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA); 4913 cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
4280 4914
4281 if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero) 4915 if (dtim_zero)
4282 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); 4916 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
4283 4917
4284 if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab) 4918 if (deliver_cab)
4285 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); 4919 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
4286 4920
4287 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 4921 return skb;
4288 ar->wmi.cmd->pdev_send_bcn_cmdid);
4289
4290 if (ret)
4291 dev_kfree_skb(skb);
4292
4293 return ret;
4294} 4922}
4295 4923
4296static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, 4924void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
4297 const struct wmi_wmm_params_arg *arg) 4925 const struct wmi_wmm_params_arg *arg)
4298{ 4926{
4299 params->cwmin = __cpu_to_le32(arg->cwmin); 4927 params->cwmin = __cpu_to_le32(arg->cwmin);
4300 params->cwmax = __cpu_to_le32(arg->cwmax); 4928 params->cwmax = __cpu_to_le32(arg->cwmax);
@@ -4304,52 +4932,54 @@ static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
4304 params->no_ack = __cpu_to_le32(arg->no_ack); 4932 params->no_ack = __cpu_to_le32(arg->no_ack);
4305} 4933}
4306 4934
4307int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 4935static struct sk_buff *
4308 const struct wmi_pdev_set_wmm_params_arg *arg) 4936ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
4937 const struct wmi_wmm_params_all_arg *arg)
4309{ 4938{
4310 struct wmi_pdev_set_wmm_params *cmd; 4939 struct wmi_pdev_set_wmm_params *cmd;
4311 struct sk_buff *skb; 4940 struct sk_buff *skb;
4312 4941
4313 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4942 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4314 if (!skb) 4943 if (!skb)
4315 return -ENOMEM; 4944 return ERR_PTR(-ENOMEM);
4316 4945
4317 cmd = (struct wmi_pdev_set_wmm_params *)skb->data; 4946 cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
4318 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be); 4947 ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
4319 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk); 4948 ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
4320 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); 4949 ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
4321 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); 4950 ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
4322 4951
4323 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); 4952 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
4324 return ath10k_wmi_cmd_send(ar, skb, 4953 return skb;
4325 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
4326} 4954}
4327 4955
4328int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) 4956static struct sk_buff *
4957ath10k_wmi_op_gen_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
4329{ 4958{
4330 struct wmi_request_stats_cmd *cmd; 4959 struct wmi_request_stats_cmd *cmd;
4331 struct sk_buff *skb; 4960 struct sk_buff *skb;
4332 4961
4333 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4962 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4334 if (!skb) 4963 if (!skb)
4335 return -ENOMEM; 4964 return ERR_PTR(-ENOMEM);
4336 4965
4337 cmd = (struct wmi_request_stats_cmd *)skb->data; 4966 cmd = (struct wmi_request_stats_cmd *)skb->data;
4338 cmd->stats_id = __cpu_to_le32(stats_id); 4967 cmd->stats_id = __cpu_to_le32(stats_id);
4339 4968
4340 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); 4969 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
4341 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 4970 return skb;
4342} 4971}
4343 4972
4344int ath10k_wmi_force_fw_hang(struct ath10k *ar, 4973static struct sk_buff *
4345 enum wmi_force_fw_hang_type type, u32 delay_ms) 4974ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
4975 enum wmi_force_fw_hang_type type, u32 delay_ms)
4346{ 4976{
4347 struct wmi_force_fw_hang_cmd *cmd; 4977 struct wmi_force_fw_hang_cmd *cmd;
4348 struct sk_buff *skb; 4978 struct sk_buff *skb;
4349 4979
4350 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 4980 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4351 if (!skb) 4981 if (!skb)
4352 return -ENOMEM; 4982 return ERR_PTR(-ENOMEM);
4353 4983
4354 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 4984 cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
4355 cmd->type = __cpu_to_le32(type); 4985 cmd->type = __cpu_to_le32(type);
@@ -4357,10 +4987,12 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
4357 4987
4358 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", 4988 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
4359 type, delay_ms); 4989 type, delay_ms);
4360 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 4990 return skb;
4361} 4991}
4362 4992
4363int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) 4993static struct sk_buff *
4994ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
4995 u32 log_level)
4364{ 4996{
4365 struct wmi_dbglog_cfg_cmd *cmd; 4997 struct wmi_dbglog_cfg_cmd *cmd;
4366 struct sk_buff *skb; 4998 struct sk_buff *skb;
@@ -4368,12 +5000,12 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
4368 5000
4369 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 5001 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4370 if (!skb) 5002 if (!skb)
4371 return -ENOMEM; 5003 return ERR_PTR(-ENOMEM);
4372 5004
4373 cmd = (struct wmi_dbglog_cfg_cmd *)skb->data; 5005 cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
4374 5006
4375 if (module_enable) { 5007 if (module_enable) {
4376 cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE, 5008 cfg = SM(log_level,
4377 ATH10K_DBGLOG_CFG_LOG_LVL); 5009 ATH10K_DBGLOG_CFG_LOG_LVL);
4378 } else { 5010 } else {
4379 /* set back defaults, all modules with WARN level */ 5011 /* set back defaults, all modules with WARN level */
@@ -4393,57 +5025,449 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
4393 __le32_to_cpu(cmd->module_valid), 5025 __le32_to_cpu(cmd->module_valid),
4394 __le32_to_cpu(cmd->config_enable), 5026 __le32_to_cpu(cmd->config_enable),
4395 __le32_to_cpu(cmd->config_valid)); 5027 __le32_to_cpu(cmd->config_valid));
4396 5028 return skb;
4397 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
4398} 5029}
4399 5030
4400int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_bitmap) 5031static struct sk_buff *
5032ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
4401{ 5033{
4402 struct wmi_pdev_pktlog_enable_cmd *cmd; 5034 struct wmi_pdev_pktlog_enable_cmd *cmd;
4403 struct sk_buff *skb; 5035 struct sk_buff *skb;
4404 5036
4405 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); 5037 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
4406 if (!skb) 5038 if (!skb)
4407 return -ENOMEM; 5039 return ERR_PTR(-ENOMEM);
4408 5040
4409 ev_bitmap &= ATH10K_PKTLOG_ANY; 5041 ev_bitmap &= ATH10K_PKTLOG_ANY;
4410 ath10k_dbg(ar, ATH10K_DBG_WMI,
4411 "wmi enable pktlog filter:%x\n", ev_bitmap);
4412 5042
4413 cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data; 5043 cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
4414 cmd->ev_bitmap = __cpu_to_le32(ev_bitmap); 5044 cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
4415 return ath10k_wmi_cmd_send(ar, skb, 5045
4416 ar->wmi.cmd->pdev_pktlog_enable_cmdid); 5046 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
5047 ev_bitmap);
5048 return skb;
4417} 5049}
4418 5050
4419int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 5051static struct sk_buff *
5052ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
4420{ 5053{
4421 struct sk_buff *skb; 5054 struct sk_buff *skb;
4422 5055
4423 skb = ath10k_wmi_alloc_skb(ar, 0); 5056 skb = ath10k_wmi_alloc_skb(ar, 0);
4424 if (!skb) 5057 if (!skb)
4425 return -ENOMEM; 5058 return ERR_PTR(-ENOMEM);
4426 5059
4427 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n"); 5060 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
5061 return skb;
5062}
5063
5064static struct sk_buff *
5065ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
5066 u32 duration, u32 next_offset,
5067 u32 enabled)
5068{
5069 struct wmi_pdev_set_quiet_cmd *cmd;
5070 struct sk_buff *skb;
5071
5072 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5073 if (!skb)
5074 return ERR_PTR(-ENOMEM);
5075
5076 cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
5077 cmd->period = __cpu_to_le32(period);
5078 cmd->duration = __cpu_to_le32(duration);
5079 cmd->next_start = __cpu_to_le32(next_offset);
5080 cmd->enabled = __cpu_to_le32(enabled);
4428 5081
4429 return ath10k_wmi_cmd_send(ar, skb, 5082 ath10k_dbg(ar, ATH10K_DBG_WMI,
4430 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 5083 "wmi quiet param: period %u duration %u enabled %d\n",
5084 period, duration, enabled);
5085 return skb;
4431} 5086}
4432 5087
4433int ath10k_wmi_attach(struct ath10k *ar) 5088static struct sk_buff *
5089ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
5090 const u8 *mac)
4434{ 5091{
4435 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { 5092 struct wmi_addba_clear_resp_cmd *cmd;
4436 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) 5093 struct sk_buff *skb;
4437 ar->wmi.cmd = &wmi_10_2_cmd_map; 5094
4438 else 5095 if (!mac)
4439 ar->wmi.cmd = &wmi_10x_cmd_map; 5096 return ERR_PTR(-EINVAL);
5097
5098 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5099 if (!skb)
5100 return ERR_PTR(-ENOMEM);
5101
5102 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
5103 cmd->vdev_id = __cpu_to_le32(vdev_id);
5104 ether_addr_copy(cmd->peer_macaddr.addr, mac);
5105
5106 ath10k_dbg(ar, ATH10K_DBG_WMI,
5107 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
5108 vdev_id, mac);
5109 return skb;
5110}
5111
5112static struct sk_buff *
5113ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
5114 u32 tid, u32 buf_size)
5115{
5116 struct wmi_addba_send_cmd *cmd;
5117 struct sk_buff *skb;
5118
5119 if (!mac)
5120 return ERR_PTR(-EINVAL);
5121
5122 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5123 if (!skb)
5124 return ERR_PTR(-ENOMEM);
5125
5126 cmd = (struct wmi_addba_send_cmd *)skb->data;
5127 cmd->vdev_id = __cpu_to_le32(vdev_id);
5128 ether_addr_copy(cmd->peer_macaddr.addr, mac);
5129 cmd->tid = __cpu_to_le32(tid);
5130 cmd->buffersize = __cpu_to_le32(buf_size);
5131
5132 ath10k_dbg(ar, ATH10K_DBG_WMI,
5133 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
5134 vdev_id, mac, tid, buf_size);
5135 return skb;
5136}
5137
5138static struct sk_buff *
5139ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
5140 u32 tid, u32 status)
5141{
5142 struct wmi_addba_setresponse_cmd *cmd;
5143 struct sk_buff *skb;
5144
5145 if (!mac)
5146 return ERR_PTR(-EINVAL);
5147
5148 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5149 if (!skb)
5150 return ERR_PTR(-ENOMEM);
5151
5152 cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
5153 cmd->vdev_id = __cpu_to_le32(vdev_id);
5154 ether_addr_copy(cmd->peer_macaddr.addr, mac);
5155 cmd->tid = __cpu_to_le32(tid);
5156 cmd->statuscode = __cpu_to_le32(status);
5157
5158 ath10k_dbg(ar, ATH10K_DBG_WMI,
5159 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
5160 vdev_id, mac, tid, status);
5161 return skb;
5162}
5163
5164static struct sk_buff *
5165ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
5166 u32 tid, u32 initiator, u32 reason)
5167{
5168 struct wmi_delba_send_cmd *cmd;
5169 struct sk_buff *skb;
5170
5171 if (!mac)
5172 return ERR_PTR(-EINVAL);
5173
5174 skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
5175 if (!skb)
5176 return ERR_PTR(-ENOMEM);
5177
5178 cmd = (struct wmi_delba_send_cmd *)skb->data;
5179 cmd->vdev_id = __cpu_to_le32(vdev_id);
5180 ether_addr_copy(cmd->peer_macaddr.addr, mac);
5181 cmd->tid = __cpu_to_le32(tid);
5182 cmd->initiator = __cpu_to_le32(initiator);
5183 cmd->reasoncode = __cpu_to_le32(reason);
5184
5185 ath10k_dbg(ar, ATH10K_DBG_WMI,
5186 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
5187 vdev_id, mac, tid, initiator, reason);
5188 return skb;
5189}
5190
5191static const struct wmi_ops wmi_ops = {
5192 .rx = ath10k_wmi_op_rx,
5193 .map_svc = wmi_main_svc_map,
5194
5195 .pull_scan = ath10k_wmi_op_pull_scan_ev,
5196 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
5197 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
5198 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
5199 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
5200 .pull_swba = ath10k_wmi_op_pull_swba_ev,
5201 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
5202 .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
5203 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
5204 .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
5205
5206 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
5207 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
5208 .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
5209 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
5210 .gen_init = ath10k_wmi_op_gen_init,
5211 .gen_start_scan = ath10k_wmi_op_gen_start_scan,
5212 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
5213 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
5214 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
5215 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
5216 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
5217 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
5218 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
5219 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
5220 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
5221 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
5222 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
5223 /* .gen_vdev_wmm_conf not implemented */
5224 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
5225 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
5226 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
5227 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
5228 .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
5229 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
5230 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
5231 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
5232 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
5233 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
5234 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
5235 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
5236 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
5237 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
5238 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
5239 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
5240 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
5241 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
5242 /* .gen_pdev_get_temperature not implemented */
5243 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
5244 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
5245 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
5246 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
5247 /* .gen_bcn_tmpl not implemented */
5248 /* .gen_prb_tmpl not implemented */
5249 /* .gen_p2p_go_bcn_ie not implemented */
5250};
5251
5252static const struct wmi_ops wmi_10_1_ops = {
5253 .rx = ath10k_wmi_10_1_op_rx,
5254 .map_svc = wmi_10x_svc_map,
5255 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
5256 .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
5257 .gen_init = ath10k_wmi_10_1_op_gen_init,
5258 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
5259 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
5260 .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
5261 /* .gen_pdev_get_temperature not implemented */
5262
5263 /* shared with main branch */
5264 .pull_scan = ath10k_wmi_op_pull_scan_ev,
5265 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
5266 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
5267 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
5268 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
5269 .pull_swba = ath10k_wmi_op_pull_swba_ev,
5270 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
5271 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
5272
5273 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
5274 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
5275 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
5276 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
5277 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
5278 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
5279 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
5280 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
5281 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
5282 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
5283 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
5284 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
5285 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
5286 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
5287 /* .gen_vdev_wmm_conf not implemented */
5288 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
5289 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
5290 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
5291 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
5292 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
5293 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
5294 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
5295 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
5296 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
5297 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
5298 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
5299 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
5300 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
5301 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
5302 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
5303 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
5304 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
5305 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
5306 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
5307 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
5308 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
5309 /* .gen_bcn_tmpl not implemented */
5310 /* .gen_prb_tmpl not implemented */
5311 /* .gen_p2p_go_bcn_ie not implemented */
5312};
5313
5314static const struct wmi_ops wmi_10_2_ops = {
5315 .rx = ath10k_wmi_10_2_op_rx,
5316 .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
5317 .gen_init = ath10k_wmi_10_2_op_gen_init,
5318 .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
5319 /* .gen_pdev_get_temperature not implemented */
5320
5321 /* shared with 10.1 */
5322 .map_svc = wmi_10x_svc_map,
5323 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
5324 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
5325 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
5326
5327 .pull_scan = ath10k_wmi_op_pull_scan_ev,
5328 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
5329 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
5330 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
5331 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
5332 .pull_swba = ath10k_wmi_op_pull_swba_ev,
5333 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
5334 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
5335
5336 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
5337 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
5338 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
5339 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
5340 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
5341 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
5342 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
5343 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
5344 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
5345 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
5346 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
5347 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
5348 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
5349 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
5350 /* .gen_vdev_wmm_conf not implemented */
5351 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
5352 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
5353 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
5354 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
5355 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
5356 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
5357 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
5358 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
5359 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
5360 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
5361 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
5362 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
5363 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
5364 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
5365 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
5366 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
5367 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
5368 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
5369 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
5370 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
5371 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
5372};
4440 5373
5374static const struct wmi_ops wmi_10_2_4_ops = {
5375 .rx = ath10k_wmi_10_2_op_rx,
5376 .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
5377 .gen_init = ath10k_wmi_10_2_op_gen_init,
5378 .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
5379 .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
5380
5381 /* shared with 10.1 */
5382 .map_svc = wmi_10x_svc_map,
5383 .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
5384 .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
5385 .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
5386
5387 .pull_scan = ath10k_wmi_op_pull_scan_ev,
5388 .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
5389 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
5390 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
5391 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
5392 .pull_swba = ath10k_wmi_op_pull_swba_ev,
5393 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
5394 .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
5395
5396 .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
5397 .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
5398 .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
5399 .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
5400 .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
5401 .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
5402 .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
5403 .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
5404 .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
5405 .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
5406 .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
5407 .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
5408 .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
5409 .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
5410 .gen_peer_create = ath10k_wmi_op_gen_peer_create,
5411 .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
5412 .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
5413 .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
5414 .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
5415 .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
5416 .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
5417 .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
5418 .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
5419 .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
5420 .gen_request_stats = ath10k_wmi_op_gen_request_stats,
5421 .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
5422 .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
5423 .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
5424 .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
5425 .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
5426 .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
5427 .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
5428 .gen_addba_send = ath10k_wmi_op_gen_addba_send,
5429 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
5430 .gen_delba_send = ath10k_wmi_op_gen_delba_send,
5431 /* .gen_bcn_tmpl not implemented */
5432 /* .gen_prb_tmpl not implemented */
5433 /* .gen_p2p_go_bcn_ie not implemented */
5434};
5435
5436int ath10k_wmi_attach(struct ath10k *ar)
5437{
5438 switch (ar->wmi.op_version) {
5439 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
5440 ar->wmi.cmd = &wmi_10_2_4_cmd_map;
5441 ar->wmi.ops = &wmi_10_2_4_ops;
5442 ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
5443 ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
5444 break;
5445 case ATH10K_FW_WMI_OP_VERSION_10_2:
5446 ar->wmi.cmd = &wmi_10_2_cmd_map;
5447 ar->wmi.ops = &wmi_10_2_ops;
4441 ar->wmi.vdev_param = &wmi_10x_vdev_param_map; 5448 ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
4442 ar->wmi.pdev_param = &wmi_10x_pdev_param_map; 5449 ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
4443 } else { 5450 break;
5451 case ATH10K_FW_WMI_OP_VERSION_10_1:
5452 ar->wmi.cmd = &wmi_10x_cmd_map;
5453 ar->wmi.ops = &wmi_10_1_ops;
5454 ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
5455 ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
5456 break;
5457 case ATH10K_FW_WMI_OP_VERSION_MAIN:
4444 ar->wmi.cmd = &wmi_cmd_map; 5458 ar->wmi.cmd = &wmi_cmd_map;
5459 ar->wmi.ops = &wmi_ops;
4445 ar->wmi.vdev_param = &wmi_vdev_param_map; 5460 ar->wmi.vdev_param = &wmi_vdev_param_map;
4446 ar->wmi.pdev_param = &wmi_pdev_param_map; 5461 ar->wmi.pdev_param = &wmi_pdev_param_map;
5462 break;
5463 case ATH10K_FW_WMI_OP_VERSION_TLV:
5464 ath10k_wmi_tlv_attach(ar);
5465 break;
5466 case ATH10K_FW_WMI_OP_VERSION_UNSET:
5467 case ATH10K_FW_WMI_OP_VERSION_MAX:
5468 ath10k_err(ar, "unsupported WMI op version: %d\n",
5469 ar->wmi.op_version);
5470 return -EINVAL;
4447 } 5471 }
4448 5472
4449 init_completion(&ar->wmi.service_ready); 5473 init_completion(&ar->wmi.service_ready);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 21391929d318..20ce3603e64b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -109,6 +109,45 @@ enum wmi_service {
109 WMI_SERVICE_BURST, 109 WMI_SERVICE_BURST,
110 WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, 110 WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT,
111 WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, 111 WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT,
112 WMI_SERVICE_ROAM_SCAN_OFFLOAD,
113 WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
114 WMI_SERVICE_EARLY_RX,
115 WMI_SERVICE_STA_SMPS,
116 WMI_SERVICE_FWTEST,
117 WMI_SERVICE_STA_WMMAC,
118 WMI_SERVICE_TDLS,
119 WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE,
120 WMI_SERVICE_ADAPTIVE_OCS,
121 WMI_SERVICE_BA_SSN_SUPPORT,
122 WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
123 WMI_SERVICE_WLAN_HB,
124 WMI_SERVICE_LTE_ANT_SHARE_SUPPORT,
125 WMI_SERVICE_BATCH_SCAN,
126 WMI_SERVICE_QPOWER,
127 WMI_SERVICE_PLMREQ,
128 WMI_SERVICE_THERMAL_MGMT,
129 WMI_SERVICE_RMC,
130 WMI_SERVICE_MHF_OFFLOAD,
131 WMI_SERVICE_COEX_SAR,
132 WMI_SERVICE_BCN_TXRATE_OVERRIDE,
133 WMI_SERVICE_NAN,
134 WMI_SERVICE_L1SS_STAT,
135 WMI_SERVICE_ESTIMATE_LINKSPEED,
136 WMI_SERVICE_OBSS_SCAN,
137 WMI_SERVICE_TDLS_OFFCHAN,
138 WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
139 WMI_SERVICE_TDLS_UAPSD_SLEEP_STA,
140 WMI_SERVICE_IBSS_PWRSAVE,
141 WMI_SERVICE_LPASS,
142 WMI_SERVICE_EXTSCAN,
143 WMI_SERVICE_D0WOW,
144 WMI_SERVICE_HSOFFLOAD,
145 WMI_SERVICE_ROAM_HO_OFFLOAD,
146 WMI_SERVICE_RX_FULL_REORDER,
147 WMI_SERVICE_DHCP_OFFLOAD,
148 WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
149 WMI_SERVICE_MDNS_OFFLOAD,
150 WMI_SERVICE_SAP_AUTH_OFFLOAD,
112 151
113 /* keep last */ 152 /* keep last */
114 WMI_SERVICE_MAX, 153 WMI_SERVICE_MAX,
@@ -215,6 +254,45 @@ static inline char *wmi_service_name(int service_id)
215 SVCSTR(WMI_SERVICE_BURST); 254 SVCSTR(WMI_SERVICE_BURST);
216 SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT); 255 SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
217 SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT); 256 SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
257 SVCSTR(WMI_SERVICE_ROAM_SCAN_OFFLOAD);
258 SVCSTR(WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC);
259 SVCSTR(WMI_SERVICE_EARLY_RX);
260 SVCSTR(WMI_SERVICE_STA_SMPS);
261 SVCSTR(WMI_SERVICE_FWTEST);
262 SVCSTR(WMI_SERVICE_STA_WMMAC);
263 SVCSTR(WMI_SERVICE_TDLS);
264 SVCSTR(WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE);
265 SVCSTR(WMI_SERVICE_ADAPTIVE_OCS);
266 SVCSTR(WMI_SERVICE_BA_SSN_SUPPORT);
267 SVCSTR(WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE);
268 SVCSTR(WMI_SERVICE_WLAN_HB);
269 SVCSTR(WMI_SERVICE_LTE_ANT_SHARE_SUPPORT);
270 SVCSTR(WMI_SERVICE_BATCH_SCAN);
271 SVCSTR(WMI_SERVICE_QPOWER);
272 SVCSTR(WMI_SERVICE_PLMREQ);
273 SVCSTR(WMI_SERVICE_THERMAL_MGMT);
274 SVCSTR(WMI_SERVICE_RMC);
275 SVCSTR(WMI_SERVICE_MHF_OFFLOAD);
276 SVCSTR(WMI_SERVICE_COEX_SAR);
277 SVCSTR(WMI_SERVICE_BCN_TXRATE_OVERRIDE);
278 SVCSTR(WMI_SERVICE_NAN);
279 SVCSTR(WMI_SERVICE_L1SS_STAT);
280 SVCSTR(WMI_SERVICE_ESTIMATE_LINKSPEED);
281 SVCSTR(WMI_SERVICE_OBSS_SCAN);
282 SVCSTR(WMI_SERVICE_TDLS_OFFCHAN);
283 SVCSTR(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA);
284 SVCSTR(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA);
285 SVCSTR(WMI_SERVICE_IBSS_PWRSAVE);
286 SVCSTR(WMI_SERVICE_LPASS);
287 SVCSTR(WMI_SERVICE_EXTSCAN);
288 SVCSTR(WMI_SERVICE_D0WOW);
289 SVCSTR(WMI_SERVICE_HSOFFLOAD);
290 SVCSTR(WMI_SERVICE_ROAM_HO_OFFLOAD);
291 SVCSTR(WMI_SERVICE_RX_FULL_REORDER);
292 SVCSTR(WMI_SERVICE_DHCP_OFFLOAD);
293 SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
294 SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
295 SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
218 default: 296 default:
219 return NULL; 297 return NULL;
220 } 298 }
@@ -472,6 +550,8 @@ struct wmi_cmd_map {
472 u32 force_fw_hang_cmdid; 550 u32 force_fw_hang_cmdid;
473 u32 gpio_config_cmdid; 551 u32 gpio_config_cmdid;
474 u32 gpio_output_cmdid; 552 u32 gpio_output_cmdid;
553 u32 pdev_get_temperature_cmdid;
554 u32 vdev_set_wmm_params_cmdid;
475}; 555};
476 556
477/* 557/*
@@ -1076,6 +1156,11 @@ enum wmi_10_2_cmd_id {
1076 WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID, 1156 WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID,
1077 WMI_10_2_PDEV_RATEPWR_TABLE_CMDID, 1157 WMI_10_2_PDEV_RATEPWR_TABLE_CMDID,
1078 WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, 1158 WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
1159 WMI_10_2_PDEV_GET_INFO,
1160 WMI_10_2_VDEV_GET_INFO,
1161 WMI_10_2_VDEV_ATF_REQUEST_CMDID,
1162 WMI_10_2_PEER_ATF_REQUEST_CMDID,
1163 WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
1079 WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1, 1164 WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
1080}; 1165};
1081 1166
@@ -1117,6 +1202,8 @@ enum wmi_10_2_event_id {
1117 WMI_10_2_MCAST_BUF_RELEASE_EVENTID, 1202 WMI_10_2_MCAST_BUF_RELEASE_EVENTID,
1118 WMI_10_2_MCAST_LIST_AGEOUT_EVENTID, 1203 WMI_10_2_MCAST_LIST_AGEOUT_EVENTID,
1119 WMI_10_2_WDS_PEER_EVENTID, 1204 WMI_10_2_WDS_PEER_EVENTID,
1205 WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
1206 WMI_10_2_PDEV_TEMPERATURE_EVENTID,
1120 WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1, 1207 WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
1121}; 1208};
1122 1209
@@ -1862,6 +1949,11 @@ struct wmi_resource_config_10x {
1862 __le32 max_frag_entries; 1949 __le32 max_frag_entries;
1863} __packed; 1950} __packed;
1864 1951
1952enum wmi_10_2_feature_mask {
1953 WMI_10_2_RX_BATCH_MODE = BIT(0),
1954 WMI_10_2_ATF_CONFIG = BIT(1),
1955};
1956
1865struct wmi_resource_config_10_2 { 1957struct wmi_resource_config_10_2 {
1866 struct wmi_resource_config_10x common; 1958 struct wmi_resource_config_10x common;
1867 __le32 max_peer_ext_stats; 1959 __le32 max_peer_ext_stats;
@@ -1870,7 +1962,7 @@ struct wmi_resource_config_10_2 {
1870 __le32 be_min_free; 1962 __le32 be_min_free;
1871 __le32 vi_min_free; 1963 __le32 vi_min_free;
1872 __le32 vo_min_free; 1964 __le32 vo_min_free;
1873 __le32 rx_batchmode; /* 0-disable, 1-enable */ 1965 __le32 feature_mask;
1874} __packed; 1966} __packed;
1875 1967
1876#define NUM_UNITS_IS_NUM_VDEVS 0x1 1968#define NUM_UNITS_IS_NUM_VDEVS 0x1
@@ -2505,6 +2597,7 @@ struct wmi_pdev_param_map {
2505 u32 fast_channel_reset; 2597 u32 fast_channel_reset;
2506 u32 burst_dur; 2598 u32 burst_dur;
2507 u32 burst_enable; 2599 u32 burst_enable;
2600 u32 cal_period;
2508}; 2601};
2509 2602
2510#define WMI_PDEV_PARAM_UNSUPPORTED 0 2603#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -2715,6 +2808,9 @@ enum wmi_10x_pdev_param {
2715 WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE, 2808 WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE,
2716 WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, 2809 WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
2717 WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, 2810 WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
2811 WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
2812 WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
2813 WMI_10X_PDEV_PARAM_CAL_PERIOD
2718}; 2814};
2719 2815
2720struct wmi_pdev_set_param_cmd { 2816struct wmi_pdev_set_param_cmd {
@@ -2722,6 +2818,9 @@ struct wmi_pdev_set_param_cmd {
2722 __le32 param_value; 2818 __le32 param_value;
2723} __packed; 2819} __packed;
2724 2820
2821/* valid period is 1 ~ 60000ms, unit in millisecond */
2822#define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000
2823
2725struct wmi_pdev_get_tpc_config_cmd { 2824struct wmi_pdev_get_tpc_config_cmd {
2726 /* parameter */ 2825 /* parameter */
2727 __le32 param; 2826 __le32 param;
@@ -2841,14 +2940,14 @@ struct wmi_wmm_params_arg {
2841 u32 no_ack; 2940 u32 no_ack;
2842}; 2941};
2843 2942
2844struct wmi_pdev_set_wmm_params_arg { 2943struct wmi_wmm_params_all_arg {
2845 struct wmi_wmm_params_arg ac_be; 2944 struct wmi_wmm_params_arg ac_be;
2846 struct wmi_wmm_params_arg ac_bk; 2945 struct wmi_wmm_params_arg ac_bk;
2847 struct wmi_wmm_params_arg ac_vi; 2946 struct wmi_wmm_params_arg ac_vi;
2848 struct wmi_wmm_params_arg ac_vo; 2947 struct wmi_wmm_params_arg ac_vo;
2849}; 2948};
2850 2949
2851struct wal_dbg_tx_stats { 2950struct wmi_pdev_stats_tx {
2852 /* Num HTT cookies queued to dispatch list */ 2951 /* Num HTT cookies queued to dispatch list */
2853 __le32 comp_queued; 2952 __le32 comp_queued;
2854 2953
@@ -2918,7 +3017,7 @@ struct wal_dbg_tx_stats {
2918 __le32 txop_ovf; 3017 __le32 txop_ovf;
2919} __packed; 3018} __packed;
2920 3019
2921struct wal_dbg_rx_stats { 3020struct wmi_pdev_stats_rx {
2922 /* Cnts any change in ring routing mid-ppdu */ 3021 /* Cnts any change in ring routing mid-ppdu */
2923 __le32 mid_ppdu_route_change; 3022 __le32 mid_ppdu_route_change;
2924 3023
@@ -2952,17 +3051,11 @@ struct wal_dbg_rx_stats {
2952 __le32 mpdu_errs; 3051 __le32 mpdu_errs;
2953} __packed; 3052} __packed;
2954 3053
2955struct wal_dbg_peer_stats { 3054struct wmi_pdev_stats_peer {
2956 /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */ 3055 /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
2957 __le32 dummy; 3056 __le32 dummy;
2958} __packed; 3057} __packed;
2959 3058
2960struct wal_dbg_stats {
2961 struct wal_dbg_tx_stats tx;
2962 struct wal_dbg_rx_stats rx;
2963 struct wal_dbg_peer_stats peer;
2964} __packed;
2965
2966enum wmi_stats_id { 3059enum wmi_stats_id {
2967 WMI_REQUEST_PEER_STAT = 0x01, 3060 WMI_REQUEST_PEER_STAT = 0x01,
2968 WMI_REQUEST_AP_STAT = 0x02 3061 WMI_REQUEST_AP_STAT = 0x02
@@ -3029,23 +3122,38 @@ struct wmi_stats_event {
3029 u8 data[0]; 3122 u8 data[0];
3030} __packed; 3123} __packed;
3031 3124
3125struct wmi_10_2_stats_event {
3126 __le32 stats_id; /* %WMI_REQUEST_ */
3127 __le32 num_pdev_stats;
3128 __le32 num_pdev_ext_stats;
3129 __le32 num_vdev_stats;
3130 __le32 num_peer_stats;
3131 __le32 num_bcnflt_stats;
3132 u8 data[0];
3133} __packed;
3134
3032/* 3135/*
3033 * PDEV statistics 3136 * PDEV statistics
3034 * TODO: add all PDEV stats here 3137 * TODO: add all PDEV stats here
3035 */ 3138 */
3139struct wmi_pdev_stats_base {
3140 __le32 chan_nf;
3141 __le32 tx_frame_count;
3142 __le32 rx_frame_count;
3143 __le32 rx_clear_count;
3144 __le32 cycle_count;
3145 __le32 phy_err_count;
3146 __le32 chan_tx_pwr;
3147} __packed;
3148
3036struct wmi_pdev_stats { 3149struct wmi_pdev_stats {
3037 __le32 chan_nf; /* Channel noise floor */ 3150 struct wmi_pdev_stats_base base;
3038 __le32 tx_frame_count; /* TX frame count */ 3151 struct wmi_pdev_stats_tx tx;
3039 __le32 rx_frame_count; /* RX frame count */ 3152 struct wmi_pdev_stats_rx rx;
3040 __le32 rx_clear_count; /* rx clear count */ 3153 struct wmi_pdev_stats_peer peer;
3041 __le32 cycle_count; /* cycle count */
3042 __le32 phy_err_count; /* Phy error count */
3043 __le32 chan_tx_pwr; /* channel tx power */
3044 struct wal_dbg_stats wal; /* WAL dbg stats */
3045} __packed; 3154} __packed;
3046 3155
3047struct wmi_10x_pdev_stats { 3156struct wmi_pdev_stats_extra {
3048 struct wmi_pdev_stats old;
3049 __le32 ack_rx_bad; 3157 __le32 ack_rx_bad;
3050 __le32 rts_bad; 3158 __le32 rts_bad;
3051 __le32 rts_good; 3159 __le32 rts_good;
@@ -3054,6 +3162,30 @@ struct wmi_10x_pdev_stats {
3054 __le32 mib_int_count; 3162 __le32 mib_int_count;
3055} __packed; 3163} __packed;
3056 3164
3165struct wmi_10x_pdev_stats {
3166 struct wmi_pdev_stats_base base;
3167 struct wmi_pdev_stats_tx tx;
3168 struct wmi_pdev_stats_rx rx;
3169 struct wmi_pdev_stats_peer peer;
3170 struct wmi_pdev_stats_extra extra;
3171} __packed;
3172
3173struct wmi_pdev_stats_mem {
3174 __le32 dram_free;
3175 __le32 iram_free;
3176} __packed;
3177
3178struct wmi_10_2_pdev_stats {
3179 struct wmi_pdev_stats_base base;
3180 struct wmi_pdev_stats_tx tx;
3181 __le32 mc_drop;
3182 struct wmi_pdev_stats_rx rx;
3183 __le32 pdev_rx_timeout;
3184 struct wmi_pdev_stats_mem mem;
3185 struct wmi_pdev_stats_peer peer;
3186 struct wmi_pdev_stats_extra extra;
3187} __packed;
3188
3057/* 3189/*
3058 * VDEV statistics 3190 * VDEV statistics
3059 * TODO: add all VDEV stats here 3191 * TODO: add all VDEV stats here
@@ -3077,6 +3209,32 @@ struct wmi_10x_peer_stats {
3077 __le32 peer_rx_rate; 3209 __le32 peer_rx_rate;
3078} __packed; 3210} __packed;
3079 3211
3212struct wmi_10_2_peer_stats {
3213 struct wmi_peer_stats old;
3214 __le32 peer_rx_rate;
3215 __le32 current_per;
3216 __le32 retries;
3217 __le32 tx_rate_count;
3218 __le32 max_4ms_frame_len;
3219 __le32 total_sub_frames;
3220 __le32 tx_bytes;
3221 __le32 num_pkt_loss_overflow[4];
3222 __le32 num_pkt_loss_excess_retry[4];
3223} __packed;
3224
3225struct wmi_10_2_4_peer_stats {
3226 struct wmi_10_2_peer_stats common;
3227 __le32 unknown_value; /* FIXME: what is this word? */
3228} __packed;
3229
3230struct wmi_10_2_pdev_ext_stats {
3231 __le32 rx_rssi_comb;
3232 __le32 rx_rssi[4];
3233 __le32 rx_mcs[10];
3234 __le32 tx_mcs[10];
3235 __le32 ack_rssi;
3236} __packed;
3237
3080struct wmi_vdev_create_cmd { 3238struct wmi_vdev_create_cmd {
3081 __le32 vdev_id; 3239 __le32 vdev_id;
3082 __le32 vdev_type; 3240 __le32 vdev_type;
@@ -3930,6 +4088,13 @@ enum wmi_sta_ps_param_pspoll_count {
3930 * Values greater than 0 indicate the maximum numer of PS-Poll frames 4088 * Values greater than 0 indicate the maximum numer of PS-Poll frames
3931 * FW will send before waking up. 4089 * FW will send before waking up.
3932 */ 4090 */
4091
4092 /* When u-APSD is enabled the firmware will be very reluctant to exit
4093 * STA PS. This could result in very poor Rx performance with STA doing
4094 * PS-Poll for each and every buffered frame. This value is a bit
4095 * arbitrary.
4096 */
4097 WMI_STA_PS_PSPOLL_COUNT_UAPSD = 3,
3933}; 4098};
3934 4099
3935/* 4100/*
@@ -3955,6 +4120,30 @@ enum wmi_sta_ps_param_uapsd {
3955 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7), 4120 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
3956}; 4121};
3957 4122
4123#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
4124
4125struct wmi_sta_uapsd_auto_trig_param {
4126 __le32 wmm_ac;
4127 __le32 user_priority;
4128 __le32 service_interval;
4129 __le32 suspend_interval;
4130 __le32 delay_interval;
4131};
4132
4133struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
4134 __le32 vdev_id;
4135 struct wmi_mac_addr peer_macaddr;
4136 __le32 num_ac;
4137};
4138
4139struct wmi_sta_uapsd_auto_trig_arg {
4140 u32 wmm_ac;
4141 u32 user_priority;
4142 u32 service_interval;
4143 u32 suspend_interval;
4144 u32 delay_interval;
4145};
4146
3958enum wmi_sta_powersave_param { 4147enum wmi_sta_powersave_param {
3959 /* 4148 /*
3960 * Controls how frames are retrievd from AP while STA is sleeping 4149 * Controls how frames are retrievd from AP while STA is sleeping
@@ -4120,7 +4309,7 @@ struct wmi_bcn_info {
4120 4309
4121struct wmi_host_swba_event { 4310struct wmi_host_swba_event {
4122 __le32 vdev_map; 4311 __le32 vdev_map;
4123 struct wmi_bcn_info bcn_info[1]; 4312 struct wmi_bcn_info bcn_info[0];
4124} __packed; 4313} __packed;
4125 4314
4126#define WMI_MAX_AP_VDEV 16 4315#define WMI_MAX_AP_VDEV 16
@@ -4325,7 +4514,7 @@ struct wmi_peer_set_q_empty_callback_cmd {
4325#define WMI_PEER_SPATIAL_MUX 0x00200000 4514#define WMI_PEER_SPATIAL_MUX 0x00200000
4326#define WMI_PEER_VHT 0x02000000 4515#define WMI_PEER_VHT 0x02000000
4327#define WMI_PEER_80MHZ 0x04000000 4516#define WMI_PEER_80MHZ 0x04000000
4328#define WMI_PEER_PMF 0x08000000 4517#define WMI_PEER_VHT_2G 0x08000000
4329 4518
4330/* 4519/*
4331 * Peer rate capabilities. 4520 * Peer rate capabilities.
@@ -4476,6 +4665,11 @@ enum wmi_sta_keepalive_method {
4476 WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2, 4665 WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
4477}; 4666};
4478 4667
4668#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
4669
4670/* Firmware crashes if keepalive interval exceeds this limit */
4671#define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff
4672
4479/* note: ip4 addresses are in network byte order, i.e. big endian */ 4673/* note: ip4 addresses are in network byte order, i.e. big endian */
4480struct wmi_sta_keepalive_arp_resp { 4674struct wmi_sta_keepalive_arp_resp {
4481 __be32 src_ip4_addr; 4675 __be32 src_ip4_addr;
@@ -4491,6 +4685,16 @@ struct wmi_sta_keepalive_cmd {
4491 struct wmi_sta_keepalive_arp_resp arp_resp; 4685 struct wmi_sta_keepalive_arp_resp arp_resp;
4492} __packed; 4686} __packed;
4493 4687
4688struct wmi_sta_keepalive_arg {
4689 u32 vdev_id;
4690 u32 enabled;
4691 u32 method;
4692 u32 interval;
4693 __be32 src_ip4_addr;
4694 __be32 dest_ip4_addr;
4695 const u8 dest_mac_addr[ETH_ALEN];
4696};
4697
4494enum wmi_force_fw_hang_type { 4698enum wmi_force_fw_hang_type {
4495 WMI_FORCE_FW_HANG_ASSERT = 1, 4699 WMI_FORCE_FW_HANG_ASSERT = 1,
4496 WMI_FORCE_FW_HANG_NO_DETECT, 4700 WMI_FORCE_FW_HANG_NO_DETECT,
@@ -4567,6 +4771,58 @@ struct wmi_dbglog_cfg_cmd {
4567 4771
4568#define WMI_MAX_MEM_REQS 16 4772#define WMI_MAX_MEM_REQS 16
4569 4773
4774struct wmi_scan_ev_arg {
4775 __le32 event_type; /* %WMI_SCAN_EVENT_ */
4776 __le32 reason; /* %WMI_SCAN_REASON_ */
4777 __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
4778 __le32 scan_req_id;
4779 __le32 scan_id;
4780 __le32 vdev_id;
4781};
4782
4783struct wmi_mgmt_rx_ev_arg {
4784 __le32 channel;
4785 __le32 snr;
4786 __le32 rate;
4787 __le32 phy_mode;
4788 __le32 buf_len;
4789 __le32 status; /* %WMI_RX_STATUS_ */
4790};
4791
4792struct wmi_ch_info_ev_arg {
4793 __le32 err_code;
4794 __le32 freq;
4795 __le32 cmd_flags;
4796 __le32 noise_floor;
4797 __le32 rx_clear_count;
4798 __le32 cycle_count;
4799};
4800
4801struct wmi_vdev_start_ev_arg {
4802 __le32 vdev_id;
4803 __le32 req_id;
4804 __le32 resp_type; /* %WMI_VDEV_RESP_ */
4805 __le32 status;
4806};
4807
4808struct wmi_peer_kick_ev_arg {
4809 const u8 *mac_addr;
4810};
4811
4812struct wmi_swba_ev_arg {
4813 __le32 vdev_map;
4814 const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
4815 const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
4816};
4817
4818struct wmi_phyerr_ev_arg {
4819 __le32 num_phyerrs;
4820 __le32 tsf_l32;
4821 __le32 tsf_u32;
4822 __le32 buf_len;
4823 const struct wmi_phyerr *phyerrs;
4824};
4825
4570struct wmi_svc_rdy_ev_arg { 4826struct wmi_svc_rdy_ev_arg {
4571 __le32 min_tx_power; 4827 __le32 min_tx_power;
4572 __le32 max_tx_power; 4828 __le32 max_tx_power;
@@ -4574,6 +4830,7 @@ struct wmi_svc_rdy_ev_arg {
4574 __le32 vht_cap; 4830 __le32 vht_cap;
4575 __le32 sw_ver0; 4831 __le32 sw_ver0;
4576 __le32 sw_ver1; 4832 __le32 sw_ver1;
4833 __le32 fw_build;
4577 __le32 phy_capab; 4834 __le32 phy_capab;
4578 __le32 num_rf_chains; 4835 __le32 num_rf_chains;
4579 __le32 eeprom_rd; 4836 __le32 eeprom_rd;
@@ -4583,83 +4840,99 @@ struct wmi_svc_rdy_ev_arg {
4583 const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS]; 4840 const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
4584}; 4841};
4585 4842
4843struct wmi_rdy_ev_arg {
4844 __le32 sw_version;
4845 __le32 abi_version;
4846 __le32 status;
4847 const u8 *mac_addr;
4848};
4849
4850struct wmi_pdev_temperature_event {
4851 /* temperature value in Celcius degree */
4852 __le32 temperature;
4853} __packed;
4854
4586struct ath10k; 4855struct ath10k;
4587struct ath10k_vif; 4856struct ath10k_vif;
4588struct ath10k_fw_stats; 4857struct ath10k_fw_stats_pdev;
4858struct ath10k_fw_stats_peer;
4589 4859
4590int ath10k_wmi_attach(struct ath10k *ar); 4860int ath10k_wmi_attach(struct ath10k *ar);
4591void ath10k_wmi_detach(struct ath10k *ar); 4861void ath10k_wmi_detach(struct ath10k *ar);
4592int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); 4862int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
4593int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); 4863int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
4594 4864
4865struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
4595int ath10k_wmi_connect(struct ath10k *ar); 4866int ath10k_wmi_connect(struct ath10k *ar);
4596 4867
4597struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len); 4868struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
4598int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 4869int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
4599 4870int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
4600int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt); 4871 u32 cmd_id);
4601int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
4602int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
4603 u16 rd5g, u16 ctl2g, u16 ctl5g,
4604 enum wmi_dfs_region dfs_reg);
4605int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
4606int ath10k_wmi_cmd_init(struct ath10k *ar);
4607int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
4608void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); 4872void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
4609int ath10k_wmi_stop_scan(struct ath10k *ar, 4873
4610 const struct wmi_stop_scan_arg *arg); 4874void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
4611int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 4875 struct ath10k_fw_stats_pdev *dst);
4612 enum wmi_vdev_type type, 4876void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
4613 enum wmi_vdev_subtype subtype, 4877 struct ath10k_fw_stats_pdev *dst);
4614 const u8 macaddr[ETH_ALEN]); 4878void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
4615int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id); 4879 struct ath10k_fw_stats_pdev *dst);
4616int ath10k_wmi_vdev_start(struct ath10k *ar, 4880void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
4617 const struct wmi_vdev_start_request_arg *); 4881 struct ath10k_fw_stats_pdev *dst);
4618int ath10k_wmi_vdev_restart(struct ath10k *ar, 4882void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
4619 const struct wmi_vdev_start_request_arg *); 4883 struct ath10k_fw_stats_peer *dst);
4620int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id); 4884void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
4621int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, 4885 struct wmi_host_mem_chunks *chunks);
4622 const u8 *bssid); 4886void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
4623int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id); 4887 const struct wmi_start_scan_arg *arg);
4624int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, 4888void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
4625 u32 param_id, u32 param_value); 4889 const struct wmi_wmm_params_arg *arg);
4626int ath10k_wmi_vdev_install_key(struct ath10k *ar, 4890void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
4627 const struct wmi_vdev_install_key_arg *arg); 4891 const struct wmi_channel_arg *arg);
4628int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 4892int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
4629 const struct wmi_vdev_spectral_conf_arg *arg); 4893
4630int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 4894int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
4631 u32 enable); 4895int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
4632int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 4896void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
4633 const u8 peer_addr[ETH_ALEN]); 4897void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
4634int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 4898int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
4635 const u8 peer_addr[ETH_ALEN]); 4899void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
4636int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 4900void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
4637 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap); 4901void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
4638int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, 4902void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
4639 const u8 *peer_addr, 4903void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
4640 enum wmi_peer_param param_id, u32 param_value); 4904void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
4641int ath10k_wmi_peer_assoc(struct ath10k *ar, 4905void ath10k_wmi_event_dfs(struct ath10k *ar,
4642 const struct wmi_peer_assoc_complete_arg *arg); 4906 const struct wmi_phyerr *phyerr, u64 tsf);
4643int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 4907void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
4644 enum wmi_sta_ps_mode psmode); 4908 const struct wmi_phyerr *phyerr,
4645int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 4909 u64 tsf);
4646 enum wmi_sta_powersave_param param_id, 4910void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
4647 u32 value); 4911void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
4648int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 4912void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb);
4649 enum wmi_ap_ps_peer_param param_id, u32 value); 4913void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb);
4650int ath10k_wmi_scan_chan_list(struct ath10k *ar, 4914void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb);
4651 const struct wmi_scan_chan_list_arg *arg); 4915void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb);
4652int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif); 4916void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
4653int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 4917 struct sk_buff *skb);
4654 const struct wmi_pdev_set_wmm_params_arg *arg); 4918void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
4655int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); 4919 struct sk_buff *skb);
4656int ath10k_wmi_force_fw_hang(struct ath10k *ar, 4920void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb);
4657 enum wmi_force_fw_hang_type type, u32 delay_ms); 4921void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb);
4658int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb); 4922void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb);
4659int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable); 4923void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb);
4660int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 4924void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb);
4661 struct ath10k_fw_stats *stats); 4925void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
4662int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_list); 4926 struct sk_buff *skb);
4663int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar); 4927void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb);
4928void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb);
4929void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb);
4930void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
4931 struct sk_buff *skb);
4932void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb);
4933void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
4934void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
4935void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
4936int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
4664 4937
4665#endif /* _WMI_H_ */ 4938#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 8f387cf67340..2ca88b593e4c 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -227,7 +227,6 @@ static struct platform_driver ath_ahb_driver = {
227 .remove = ath_ahb_remove, 227 .remove = ath_ahb_remove,
228 .driver = { 228 .driver = {
229 .name = "ar231x-wmac", 229 .name = "ar231x-wmac",
230 .owner = THIS_MODULE,
231 }, 230 },
232}; 231};
233 232
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 19eab2a69ad5..3b4a6463d87a 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -672,10 +672,10 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
672 spin_lock_bh(&common->cc_lock); 672 spin_lock_bh(&common->cc_lock);
673 ath_hw_cycle_counters_update(common); 673 ath_hw_cycle_counters_update(common);
674 if (cc->cycles > 0) { 674 if (cc->cycles > 0) {
675 ah->survey.channel_time += cc->cycles / div; 675 ah->survey.time += cc->cycles / div;
676 ah->survey.channel_time_busy += cc->rx_busy / div; 676 ah->survey.time_busy += cc->rx_busy / div;
677 ah->survey.channel_time_rx += cc->rx_frame / div; 677 ah->survey.time_rx += cc->rx_frame / div;
678 ah->survey.channel_time_tx += cc->tx_frame / div; 678 ah->survey.time_tx += cc->tx_frame / div;
679 } 679 }
680 memset(cc, 0, sizeof(*cc)); 680 memset(cc, 0, sizeof(*cc));
681 spin_unlock_bh(&common->cc_lock); 681 spin_unlock_bh(&common->cc_lock);
@@ -686,10 +686,10 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
686 survey->noise = ah->ah_noise_floor; 686 survey->noise = ah->ah_noise_floor;
687 survey->filled = SURVEY_INFO_NOISE_DBM | 687 survey->filled = SURVEY_INFO_NOISE_DBM |
688 SURVEY_INFO_IN_USE | 688 SURVEY_INFO_IN_USE |
689 SURVEY_INFO_CHANNEL_TIME | 689 SURVEY_INFO_TIME |
690 SURVEY_INFO_CHANNEL_TIME_BUSY | 690 SURVEY_INFO_TIME_BUSY |
691 SURVEY_INFO_CHANNEL_TIME_RX | 691 SURVEY_INFO_TIME_RX |
692 SURVEY_INFO_CHANNEL_TIME_TX; 692 SURVEY_INFO_TIME_TX;
693 693
694 return 0; 694 return 0;
695} 695}
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index c60d36aa13e2..bf29da5e90da 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -912,6 +912,7 @@ ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
912 pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE 912 pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
913 | (ah->ah_version == AR5K_AR5210 ? 913 | (ah->ah_version == AR5K_AR5210 ?
914 AR5K_STA_ID1_PWR_SV : 0); 914 AR5K_STA_ID1_PWR_SV : 0);
915 /* fall through */
915 case NL80211_IFTYPE_MONITOR: 916 case NL80211_IFTYPE_MONITOR:
916 pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE 917 pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
917 | (ah->ah_version == AR5K_AR5210 ? 918 | (ah->ah_version == AR5K_AR5210 ?
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index a3399c4f13a9..b9b651ea9851 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
478 regval = ioread32(reg); 478 regval = ioread32(reg);
479 iowrite32(regval | val, reg); 479 iowrite32(regval | val, reg);
480 regval = ioread32(reg); 480 regval = ioread32(reg);
481 usleep_range(100, 150); 481 udelay(100); /* NB: should be atomic */
482 482
483 /* Bring BB/MAC out of reset */ 483 /* Bring BB/MAC out of reset */
484 iowrite32(regval & ~val, reg); 484 iowrite32(regval & ~val, reg);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 7a5337877a0c..85da63a67faf 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1799,20 +1799,20 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1799 1799
1800 if (vif->target_stats.rx_byte) { 1800 if (vif->target_stats.rx_byte) {
1801 sinfo->rx_bytes = vif->target_stats.rx_byte; 1801 sinfo->rx_bytes = vif->target_stats.rx_byte;
1802 sinfo->filled |= STATION_INFO_RX_BYTES64; 1802 sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
1803 sinfo->rx_packets = vif->target_stats.rx_pkt; 1803 sinfo->rx_packets = vif->target_stats.rx_pkt;
1804 sinfo->filled |= STATION_INFO_RX_PACKETS; 1804 sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
1805 } 1805 }
1806 1806
1807 if (vif->target_stats.tx_byte) { 1807 if (vif->target_stats.tx_byte) {
1808 sinfo->tx_bytes = vif->target_stats.tx_byte; 1808 sinfo->tx_bytes = vif->target_stats.tx_byte;
1809 sinfo->filled |= STATION_INFO_TX_BYTES64; 1809 sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
1810 sinfo->tx_packets = vif->target_stats.tx_pkt; 1810 sinfo->tx_packets = vif->target_stats.tx_pkt;
1811 sinfo->filled |= STATION_INFO_TX_PACKETS; 1811 sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
1812 } 1812 }
1813 1813
1814 sinfo->signal = vif->target_stats.cs_rssi; 1814 sinfo->signal = vif->target_stats.cs_rssi;
1815 sinfo->filled |= STATION_INFO_SIGNAL; 1815 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
1816 1816
1817 rate = vif->target_stats.tx_ucast_rate; 1817 rate = vif->target_stats.tx_ucast_rate;
1818 1818
@@ -1827,6 +1827,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1827 } 1827 }
1828 1828
1829 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; 1829 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
1830 sinfo->txrate.bw = RATE_INFO_BW_20;
1830 } else if (is_rate_ht40(rate, &mcs, &sgi)) { 1831 } else if (is_rate_ht40(rate, &mcs, &sgi)) {
1831 if (sgi) { 1832 if (sgi) {
1832 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1833 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
@@ -1835,7 +1836,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1835 sinfo->txrate.mcs = mcs; 1836 sinfo->txrate.mcs = mcs;
1836 } 1837 }
1837 1838
1838 sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; 1839 sinfo->txrate.bw = RATE_INFO_BW_40;
1839 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; 1840 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
1840 } else { 1841 } else {
1841 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 1842 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
@@ -1844,12 +1845,12 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1844 return 0; 1845 return 0;
1845 } 1846 }
1846 1847
1847 sinfo->filled |= STATION_INFO_TX_BITRATE; 1848 sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
1848 1849
1849 if (test_bit(CONNECTED, &vif->flags) && 1850 if (test_bit(CONNECTED, &vif->flags) &&
1850 test_bit(DTIM_PERIOD_AVAIL, &vif->flags) && 1851 test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
1851 vif->nw_type == INFRA_NETWORK) { 1852 vif->nw_type == INFRA_NETWORK) {
1852 sinfo->filled |= STATION_INFO_BSS_PARAM; 1853 sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
1853 sinfo->bss_param.flags = 0; 1854 sinfo->bss_param.flags = 0;
1854 sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period; 1855 sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
1855 sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int; 1856 sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 933aef025698..b42ba46b5030 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -488,7 +488,6 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
488 488
489 sinfo.assoc_req_ies = ies; 489 sinfo.assoc_req_ies = ies;
490 sinfo.assoc_req_ies_len = ies_len; 490 sinfo.assoc_req_ies_len = ies_len;
491 sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
492 491
493 cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL); 492 cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
494 493
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index e000c4c27881..bd4a1a655f42 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -43,6 +43,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
43 .name = "qca953x_wmac", 43 .name = "qca953x_wmac",
44 .driver_data = AR9300_DEVID_AR953X, 44 .driver_data = AR9300_DEVID_AR953X,
45 }, 45 },
46 {
47 .name = "qca956x_wmac",
48 .driver_data = AR9300_DEVID_QCA956X,
49 },
46 {}, 50 {},
47}; 51};
48 52
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index ba502a2d199b..ca01d17d130f 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -259,7 +259,8 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
259 entry_cck->fir_step_level); 259 entry_cck->fir_step_level);
260 260
261 /* Skip MRC CCK for pre AR9003 families */ 261 /* Skip MRC CCK for pre AR9003 families */
262 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) 262 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah) ||
263 AR_SREV_9565(ah) || AR_SREV_9561(ah))
263 return; 264 return;
264 265
265 if (aniState->mrcCCK != entry_cck->mrc_cck_on) 266 if (aniState->mrcCCK != entry_cck->mrc_cck_on)
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 5829074208fa..f273427fdd29 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -22,6 +22,21 @@
22 22
23/* All code below is for AR5008, AR9001, AR9002 */ 23/* All code below is for AR5008, AR9001, AR9002 */
24 24
25#define AR5008_OFDM_RATES 8
26#define AR5008_HT_SS_RATES 8
27#define AR5008_HT_DS_RATES 8
28
29#define AR5008_HT20_SHIFT 16
30#define AR5008_HT40_SHIFT 24
31
32#define AR5008_11NA_OFDM_SHIFT 0
33#define AR5008_11NA_HT_SS_SHIFT 8
34#define AR5008_11NA_HT_DS_SHIFT 16
35
36#define AR5008_11NG_OFDM_SHIFT 4
37#define AR5008_11NG_HT_SS_SHIFT 12
38#define AR5008_11NG_HT_DS_SHIFT 20
39
25static const int firstep_table[] = 40static const int firstep_table[] =
26/* level: 0 1 2 3 4 5 6 7 8 */ 41/* level: 0 1 2 3 4 5 6 7 8 */
27 { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */ 42 { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
@@ -1235,6 +1250,71 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
1235 conf->radar_inband = 8; 1250 conf->radar_inband = 8;
1236} 1251}
1237 1252
1253static void ar5008_hw_init_txpower_cck(struct ath_hw *ah, int16_t *rate_array)
1254{
1255#define CCK_DELTA(x) ((OLC_FOR_AR9280_20_LATER) ? max((x) - 2, 0) : (x))
1256 ah->tx_power[0] = CCK_DELTA(rate_array[rate1l]);
1257 ah->tx_power[1] = CCK_DELTA(min(rate_array[rate2l],
1258 rate_array[rate2s]));
1259 ah->tx_power[2] = CCK_DELTA(min(rate_array[rate5_5l],
1260 rate_array[rate5_5s]));
1261 ah->tx_power[3] = CCK_DELTA(min(rate_array[rate11l],
1262 rate_array[rate11s]));
1263#undef CCK_DELTA
1264}
1265
1266static void ar5008_hw_init_txpower_ofdm(struct ath_hw *ah, int16_t *rate_array,
1267 int offset)
1268{
1269 int i, idx = 0;
1270
1271 for (i = offset; i < offset + AR5008_OFDM_RATES; i++) {
1272 ah->tx_power[i] = rate_array[idx];
1273 idx++;
1274 }
1275}
1276
1277static void ar5008_hw_init_txpower_ht(struct ath_hw *ah, int16_t *rate_array,
1278 int ss_offset, int ds_offset,
1279 bool is_40, int ht40_delta)
1280{
1281 int i, mcs_idx = (is_40) ? AR5008_HT40_SHIFT : AR5008_HT20_SHIFT;
1282
1283 for (i = ss_offset; i < ss_offset + AR5008_HT_SS_RATES; i++) {
1284 ah->tx_power[i] = rate_array[mcs_idx] + ht40_delta;
1285 mcs_idx++;
1286 }
1287 memcpy(&ah->tx_power[ds_offset], &ah->tx_power[ss_offset],
1288 AR5008_HT_SS_RATES);
1289}
1290
1291void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
1292 struct ath9k_channel *chan, int ht40_delta)
1293{
1294 if (IS_CHAN_5GHZ(chan)) {
1295 ar5008_hw_init_txpower_ofdm(ah, rate_array,
1296 AR5008_11NA_OFDM_SHIFT);
1297 if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
1298 ar5008_hw_init_txpower_ht(ah, rate_array,
1299 AR5008_11NA_HT_SS_SHIFT,
1300 AR5008_11NA_HT_DS_SHIFT,
1301 IS_CHAN_HT40(chan),
1302 ht40_delta);
1303 }
1304 } else {
1305 ar5008_hw_init_txpower_cck(ah, rate_array);
1306 ar5008_hw_init_txpower_ofdm(ah, rate_array,
1307 AR5008_11NG_OFDM_SHIFT);
1308 if (IS_CHAN_HT20(chan) || IS_CHAN_HT40(chan)) {
1309 ar5008_hw_init_txpower_ht(ah, rate_array,
1310 AR5008_11NG_HT_SS_SHIFT,
1311 AR5008_11NG_HT_DS_SHIFT,
1312 IS_CHAN_HT40(chan),
1313 ht40_delta);
1314 }
1315 }
1316}
1317
1238int ar5008_hw_attach_phy_ops(struct ath_hw *ah) 1318int ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1239{ 1319{
1240 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 1320 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 06ab71db6e80..174442beb952 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1203,24 +1203,41 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
1203static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) 1203static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
1204{ 1204{
1205 int offset[8] = {0}, total = 0, test; 1205 int offset[8] = {0}, total = 0, test;
1206 int agc_out, i; 1206 int agc_out, i, peak_detect_threshold;
1207 1207
1208 if (AR_SREV_9550(ah) || AR_SREV_9531(ah))
1209 peak_detect_threshold = 8;
1210 else
1211 peak_detect_threshold = 0;
1212
1213 /*
1214 * Turn off LNA/SW.
1215 */
1208 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), 1216 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
1209 AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1); 1217 AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1);
1210 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), 1218 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
1211 AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0); 1219 AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0);
1212 if (is_2g)
1213 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
1214 AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
1215 else
1216 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
1217 AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
1218 1220
1221 if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9330_11(ah)) {
1222 if (is_2g)
1223 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
1224 AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
1225 else
1226 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
1227 AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
1228 }
1229
1230 /*
1231 * Turn off RXON.
1232 */
1219 REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), 1233 REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
1220 AR_PHY_65NM_RXTX2_RXON_OVR, 0x1); 1234 AR_PHY_65NM_RXTX2_RXON_OVR, 0x1);
1221 REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), 1235 REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
1222 AR_PHY_65NM_RXTX2_RXON, 0x0); 1236 AR_PHY_65NM_RXTX2_RXON, 0x0);
1223 1237
1238 /*
1239 * Turn on AGC for cal.
1240 */
1224 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1241 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
1225 AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1); 1242 AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
1226 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1243 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
@@ -1228,16 +1245,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
1228 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1245 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
1229 AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1); 1246 AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
1230 1247
1231 if (AR_SREV_9330_11(ah)) { 1248 if (AR_SREV_9330_11(ah))
1232 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1249 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
1233 AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0); 1250 AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
1234 } else { 1251
1252 if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
1235 if (is_2g) 1253 if (is_2g)
1236 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1254 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
1237 AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0); 1255 AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
1256 peak_detect_threshold);
1238 else 1257 else
1239 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1258 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
1240 AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0); 1259 AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
1260 peak_detect_threshold);
1241 } 1261 }
1242 1262
1243 for (i = 6; i > 0; i--) { 1263 for (i = 6; i > 0; i--) {
@@ -1266,10 +1286,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
1266 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1286 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
1267 AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total); 1287 AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total);
1268 1288
1289 /*
1290 * Turn on LNA.
1291 */
1269 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), 1292 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
1270 AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0); 1293 AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0);
1294 /*
1295 * Turn off RXON.
1296 */
1271 REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), 1297 REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
1272 AR_PHY_65NM_RXTX2_RXON_OVR, 0); 1298 AR_PHY_65NM_RXTX2_RXON_OVR, 0);
1299 /*
1300 * Turn off peak detect calibration.
1301 */
1273 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), 1302 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
1274 AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0); 1303 AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
1275} 1304}
@@ -1611,8 +1640,14 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
1611 1640
1612skip_tx_iqcal: 1641skip_tx_iqcal:
1613 if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { 1642 if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
1614 if (AR_SREV_9330_11(ah)) 1643 if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
1615 ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan)); 1644 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
1645 if (!(ah->rxchainmask & (1 << i)))
1646 continue;
1647 ar9003_hw_manual_peak_cal(ah, i,
1648 IS_CHAN_2GHZ(chan));
1649 }
1650 }
1616 1651
1617 /* 1652 /*
1618 * For non-AR9550 chips, we just trigger AGC calibration 1653 * For non-AR9550 chips, we just trigger AGC calibration
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 08225a0067c2..8b4561e8ce1a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3536,7 +3536,7 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
3536 int bias = ar9003_modal_header(ah, is2ghz)->xpaBiasLvl; 3536 int bias = ar9003_modal_header(ah, is2ghz)->xpaBiasLvl;
3537 3537
3538 if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah) || 3538 if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
3539 AR_SREV_9531(ah)) 3539 AR_SREV_9531(ah) || AR_SREV_9561(ah))
3540 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias); 3540 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
3541 else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah)) 3541 else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
3542 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); 3542 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3599,7 +3599,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3599 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { 3599 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
3600 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, 3600 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
3601 AR_SWITCH_TABLE_COM_AR9462_ALL, value); 3601 AR_SWITCH_TABLE_COM_AR9462_ALL, value);
3602 } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) { 3602 } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
3603 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, 3603 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
3604 AR_SWITCH_TABLE_COM_AR9550_ALL, value); 3604 AR_SWITCH_TABLE_COM_AR9550_ALL, value);
3605 } else 3605 } else
@@ -3929,9 +3929,13 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3929 REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); 3929 REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
3930 if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) 3930 if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
3931 return; 3931 return;
3932 } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { 3932 } else if (AR_SREV_9462(ah) || AR_SREV_9565(ah) ||
3933 AR_SREV_9561(ah)) {
3933 reg_val = le32_to_cpu(pBase->swreg); 3934 reg_val = le32_to_cpu(pBase->swreg);
3934 REG_WRITE(ah, AR_PHY_PMU1, reg_val); 3935 REG_WRITE(ah, AR_PHY_PMU1, reg_val);
3936
3937 if (AR_SREV_9561(ah))
3938 REG_WRITE(ah, AR_PHY_PMU2, 0x10200000);
3935 } else { 3939 } else {
3936 /* Internal regulator is ON. Write swreg register. */ 3940 /* Internal regulator is ON. Write swreg register. */
3937 reg_val = le32_to_cpu(pBase->swreg); 3941 reg_val = le32_to_cpu(pBase->swreg);
@@ -4034,7 +4038,8 @@ static void ar9003_hw_xpa_timing_control_apply(struct ath_hw *ah, bool is2ghz)
4034 if (!AR_SREV_9300(ah) && 4038 if (!AR_SREV_9300(ah) &&
4035 !AR_SREV_9340(ah) && 4039 !AR_SREV_9340(ah) &&
4036 !AR_SREV_9580(ah) && 4040 !AR_SREV_9580(ah) &&
4037 !AR_SREV_9531(ah)) 4041 !AR_SREV_9531(ah) &&
4042 !AR_SREV_9561(ah))
4038 return; 4043 return;
4039 4044
4040 xpa_ctl = ar9003_modal_header(ah, is2ghz)->txFrameToXpaOn; 4045 xpa_ctl = ar9003_modal_header(ah, is2ghz)->txFrameToXpaOn;
@@ -4812,7 +4817,7 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
4812 } 4817 }
4813 4818
4814tempslope: 4819tempslope:
4815 if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) { 4820 if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
4816 u8 txmask = (eep->baseEepHeader.txrxMask & 0xf0) >> 4; 4821 u8 txmask = (eep->baseEepHeader.txrxMask & 0xf0) >> 4;
4817 4822
4818 /* 4823 /*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 06ad2172030e..4335ccbe7d7e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -29,6 +29,7 @@
29#include "ar9565_1p0_initvals.h" 29#include "ar9565_1p0_initvals.h"
30#include "ar9565_1p1_initvals.h" 30#include "ar9565_1p1_initvals.h"
31#include "ar953x_initvals.h" 31#include "ar953x_initvals.h"
32#include "ar956x_initvals.h"
32 33
33/* General hardware code for the AR9003 hadware family */ 34/* General hardware code for the AR9003 hadware family */
34 35
@@ -358,6 +359,40 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
358 359
359 INIT_INI_ARRAY(&ah->iniModesFastClock, 360 INIT_INI_ARRAY(&ah->iniModesFastClock,
360 qca953x_1p0_modes_fast_clock); 361 qca953x_1p0_modes_fast_clock);
362 } else if (AR_SREV_9561(ah)) {
363 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
364 qca956x_1p0_mac_core);
365 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
366 qca956x_1p0_mac_postamble);
367
368 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
369 qca956x_1p0_baseband_core);
370 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
371 qca956x_1p0_baseband_postamble);
372
373 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
374 qca956x_1p0_radio_core);
375 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
376 qca956x_1p0_radio_postamble);
377
378 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
379 qca956x_1p0_soc_preamble);
380 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
381 qca956x_1p0_soc_postamble);
382
383 INIT_INI_ARRAY(&ah->iniModesRxGain,
384 qca956x_1p0_common_wo_xlna_rx_gain_table);
385 INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
386 qca956x_1p0_common_wo_xlna_rx_gain_bounds);
387 INIT_INI_ARRAY(&ah->iniModesTxGain,
388 qca956x_1p0_modes_no_xpa_tx_gain_table);
389
390 INIT_INI_ARRAY(&ah->ini_dfs,
391 qca956x_1p0_baseband_postamble_dfs_channel);
392 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
393 qca956x_1p0_baseband_core_txfir_coeff_japan_2484);
394 INIT_INI_ARRAY(&ah->iniModesFastClock,
395 qca956x_1p0_modes_fast_clock);
361 } else if (AR_SREV_9580(ah)) { 396 } else if (AR_SREV_9580(ah)) {
362 /* mac */ 397 /* mac */
363 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 398 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -544,6 +579,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
544 else if (AR_SREV_9531_20(ah)) 579 else if (AR_SREV_9531_20(ah))
545 INIT_INI_ARRAY(&ah->iniModesTxGain, 580 INIT_INI_ARRAY(&ah->iniModesTxGain,
546 qca953x_2p0_modes_xpa_tx_gain_table); 581 qca953x_2p0_modes_xpa_tx_gain_table);
582 else if (AR_SREV_9561(ah))
583 INIT_INI_ARRAY(&ah->iniModesTxGain,
584 qca956x_1p0_modes_xpa_tx_gain_table);
547 else if (AR_SREV_9580(ah)) 585 else if (AR_SREV_9580(ah))
548 INIT_INI_ARRAY(&ah->iniModesTxGain, 586 INIT_INI_ARRAY(&ah->iniModesTxGain,
549 ar9580_1p0_lowest_ob_db_tx_gain_table); 587 ar9580_1p0_lowest_ob_db_tx_gain_table);
@@ -594,7 +632,10 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
594 else 632 else
595 INIT_INI_ARRAY(&ah->iniModesTxGain, 633 INIT_INI_ARRAY(&ah->iniModesTxGain,
596 qca953x_1p0_modes_no_xpa_tx_gain_table); 634 qca953x_1p0_modes_no_xpa_tx_gain_table);
597 } else if (AR_SREV_9462_21(ah)) 635 } else if (AR_SREV_9561(ah))
636 INIT_INI_ARRAY(&ah->iniModesTxGain,
637 qca956x_1p0_modes_no_xpa_tx_gain_table);
638 else if (AR_SREV_9462_21(ah))
598 INIT_INI_ARRAY(&ah->iniModesTxGain, 639 INIT_INI_ARRAY(&ah->iniModesTxGain,
599 ar9462_2p1_modes_high_ob_db_tx_gain); 640 ar9462_2p1_modes_high_ob_db_tx_gain);
600 else if (AR_SREV_9462_20(ah)) 641 else if (AR_SREV_9462_20(ah))
@@ -628,6 +669,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
628 else if (AR_SREV_9580(ah)) 669 else if (AR_SREV_9580(ah))
629 INIT_INI_ARRAY(&ah->iniModesTxGain, 670 INIT_INI_ARRAY(&ah->iniModesTxGain,
630 ar9580_1p0_low_ob_db_tx_gain_table); 671 ar9580_1p0_low_ob_db_tx_gain_table);
672 else if (AR_SREV_9561(ah))
673 INIT_INI_ARRAY(&ah->iniModesTxGain,
674 qca956x_1p0_modes_no_xpa_low_ob_db_tx_gain_table);
631 else if (AR_SREV_9565_11(ah)) 675 else if (AR_SREV_9565_11(ah))
632 INIT_INI_ARRAY(&ah->iniModesTxGain, 676 INIT_INI_ARRAY(&ah->iniModesTxGain,
633 ar9565_1p1_modes_low_ob_db_tx_gain_table); 677 ar9565_1p1_modes_low_ob_db_tx_gain_table);
@@ -699,6 +743,9 @@ static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
699 else if (AR_SREV_9580(ah)) 743 else if (AR_SREV_9580(ah))
700 INIT_INI_ARRAY(&ah->iniModesTxGain, 744 INIT_INI_ARRAY(&ah->iniModesTxGain,
701 ar9580_1p0_type5_tx_gain_table); 745 ar9580_1p0_type5_tx_gain_table);
746 else if (AR_SREV_9561(ah))
747 INIT_INI_ARRAY(&ah->iniModesTxGain,
748 qca956x_1p0_modes_no_xpa_green_tx_gain_table);
702 else if (AR_SREV_9300_22(ah)) 749 else if (AR_SREV_9300_22(ah))
703 INIT_INI_ARRAY(&ah->iniModesTxGain, 750 INIT_INI_ARRAY(&ah->iniModesTxGain,
704 ar9300Modes_type5_tx_gain_table_2p2); 751 ar9300Modes_type5_tx_gain_table_2p2);
@@ -770,6 +817,13 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
770 qca953x_1p0_common_rx_gain_table); 817 qca953x_1p0_common_rx_gain_table);
771 INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds, 818 INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
772 qca953x_1p0_common_rx_gain_bounds); 819 qca953x_1p0_common_rx_gain_bounds);
820 } else if (AR_SREV_9561(ah)) {
821 INIT_INI_ARRAY(&ah->iniModesRxGain,
822 qca956x_1p0_common_rx_gain_table);
823 INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
824 qca956x_1p0_common_rx_gain_bounds);
825 INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
826 qca956x_1p0_xlna_only);
773 } else if (AR_SREV_9580(ah)) 827 } else if (AR_SREV_9580(ah))
774 INIT_INI_ARRAY(&ah->iniModesRxGain, 828 INIT_INI_ARRAY(&ah->iniModesRxGain,
775 ar9580_1p0_rx_gain_table); 829 ar9580_1p0_rx_gain_table);
@@ -825,6 +879,11 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
825 qca953x_2p0_common_wo_xlna_rx_gain_table); 879 qca953x_2p0_common_wo_xlna_rx_gain_table);
826 INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds, 880 INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
827 qca953x_2p0_common_wo_xlna_rx_gain_bounds); 881 qca953x_2p0_common_wo_xlna_rx_gain_bounds);
882 } else if (AR_SREV_9561(ah)) {
883 INIT_INI_ARRAY(&ah->iniModesRxGain,
884 qca956x_1p0_common_wo_xlna_rx_gain_table);
885 INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
886 qca956x_1p0_common_wo_xlna_rx_gain_bounds);
828 } else if (AR_SREV_9580(ah)) 887 } else if (AR_SREV_9580(ah))
829 INIT_INI_ARRAY(&ah->iniModesRxGain, 888 INIT_INI_ARRAY(&ah->iniModesRxGain,
830 ar9580_1p0_wo_xlna_rx_gain_table); 889 ar9580_1p0_wo_xlna_rx_gain_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index ae6cde273414..1ad66b76749b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -183,7 +183,8 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
183 } else { 183 } else {
184 channelSel = CHANSEL_2G(freq) >> 1; 184 channelSel = CHANSEL_2G(freq) >> 1;
185 } 185 }
186 } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) { 186 } else if (AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
187 AR_SREV_9561(ah)) {
187 if (ah->is_clk_25mhz) 188 if (ah->is_clk_25mhz)
188 div = 75; 189 div = 75;
189 else 190 else
@@ -198,7 +199,8 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
198 /* Set to 2G mode */ 199 /* Set to 2G mode */
199 bMode = 1; 200 bMode = 1;
200 } else { 201 } else {
201 if ((AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) && 202 if ((AR_SREV_9340(ah) || AR_SREV_9550(ah) ||
203 AR_SREV_9531(ah) || AR_SREV_9561(ah)) &&
202 ah->is_clk_25mhz) { 204 ah->is_clk_25mhz) {
203 channelSel = freq / 75; 205 channelSel = freq / 75;
204 chan_frac = ((freq % 75) * 0x20000) / 75; 206 chan_frac = ((freq % 75) * 0x20000) / 75;
@@ -265,7 +267,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
265 */ 267 */
266 268
267 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) || 269 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
268 AR_SREV_9550(ah)) { 270 AR_SREV_9550(ah) || AR_SREV_9561(ah)) {
269 if (spur_fbin_ptr[0] == 0) /* No spur */ 271 if (spur_fbin_ptr[0] == 0) /* No spur */
270 return; 272 return;
271 max_spur_cnts = 5; 273 max_spur_cnts = 5;
@@ -292,7 +294,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
292 294
293 negative = 0; 295 negative = 0;
294 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) || 296 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
295 AR_SREV_9550(ah)) 297 AR_SREV_9550(ah) || AR_SREV_9561(ah))
296 cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i], 298 cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
297 IS_CHAN_2GHZ(chan)); 299 IS_CHAN_2GHZ(chan));
298 else 300 else
@@ -641,8 +643,10 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
641 (REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO); 643 (REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
642 644
643 /* Enable 11n HT, 20 MHz */ 645 /* Enable 11n HT, 20 MHz */
644 phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 | 646 phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
645 AR_PHY_GC_SHORT_GI_40 | enableDacFifo; 647
648 if (!AR_SREV_9561(ah))
649 phymode |= AR_PHY_GC_SINGLE_HT_LTF1;
646 650
647 /* Configure baseband for dynamic 20/40 operation */ 651 /* Configure baseband for dynamic 20/40 operation */
648 if (IS_CHAN_HT40(chan)) { 652 if (IS_CHAN_HT40(chan)) {
@@ -745,7 +749,8 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
745 else 749 else
746 ah->enabled_cals &= ~TX_CL_CAL; 750 ah->enabled_cals &= ~TX_CL_CAL;
747 751
748 if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) { 752 if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
753 AR_SREV_9561(ah)) {
749 if (ah->is_clk_25mhz) { 754 if (ah->is_clk_25mhz) {
750 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); 755 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
751 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); 756 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
@@ -812,6 +817,19 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
812 return ret; 817 return ret;
813} 818}
814 819
820static int ar9561_hw_get_modes_txgain_index(struct ath_hw *ah,
821 struct ath9k_channel *chan)
822{
823 if (IS_CHAN_2GHZ(chan)) {
824 if (IS_CHAN_HT40(chan))
825 return 1;
826 else
827 return 2;
828 }
829
830 return 0;
831}
832
815static void ar9003_doubler_fix(struct ath_hw *ah) 833static void ar9003_doubler_fix(struct ath_hw *ah)
816{ 834{
817 if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9550(ah)) { 835 if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9550(ah)) {
@@ -911,21 +929,29 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
911 REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna, 929 REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
912 modesIndex, regWrites); 930 modesIndex, regWrites);
913 } 931 }
932
933 if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
934 REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
935 modesIndex, regWrites);
914 } 936 }
915 937
916 if (AR_SREV_9550(ah)) 938 if (AR_SREV_9550(ah) || AR_SREV_9561(ah))
917 REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex, 939 REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
918 regWrites); 940 regWrites);
919 941
920 /* 942 /*
921 * TXGAIN initvals. 943 * TXGAIN initvals.
922 */ 944 */
923 if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) { 945 if (AR_SREV_9550(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
924 int modes_txgain_index = 1; 946 int modes_txgain_index = 1;
925 947
926 if (AR_SREV_9550(ah)) 948 if (AR_SREV_9550(ah))
927 modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan); 949 modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
928 950
951 if (AR_SREV_9561(ah))
952 modes_txgain_index =
953 ar9561_hw_get_modes_txgain_index(ah, chan);
954
929 if (modes_txgain_index < 0) 955 if (modes_txgain_index < 0)
930 return -EINVAL; 956 return -EINVAL;
931 957
@@ -1989,7 +2015,8 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1989 priv_ops->rf_set_freq = ar9003_hw_set_channel; 2015 priv_ops->rf_set_freq = ar9003_hw_set_channel;
1990 priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate; 2016 priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
1991 2017
1992 if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) 2018 if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
2019 AR_SREV_9561(ah))
1993 priv_ops->compute_pll_control = ar9003_hw_compute_pll_control_soc; 2020 priv_ops->compute_pll_control = ar9003_hw_compute_pll_control_soc;
1994 else 2021 else
1995 priv_ops->compute_pll_control = ar9003_hw_compute_pll_control; 2022 priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index fd090b1f2d0f..c311b2bfdb00 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -454,7 +454,7 @@
454#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4) 454#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
455#define AR_PHY_MODE (AR_SM_BASE + 0x8) 455#define AR_PHY_MODE (AR_SM_BASE + 0x8)
456#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc) 456#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
457#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + 0x20) 457#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
458#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24) 458#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
459#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28) 459#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
460#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c) 460#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
@@ -506,7 +506,7 @@
506#define AR_PHY_TEST_CHAIN_SEL 0xC0000000 506#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
507#define AR_PHY_TEST_CHAIN_SEL_S 30 507#define AR_PHY_TEST_CHAIN_SEL_S 30
508 508
509#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + 0x164) 509#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x160 : 0x164))
510#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1 510#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
511#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0 511#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
512#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C 512#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
@@ -525,7 +525,7 @@
525 525
526#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c) 526#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
527 527
528#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170) 528#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
529#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008 529#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008
530#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3 530#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3
531 531
@@ -536,7 +536,7 @@
536#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190) 536#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
537#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194) 537#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
538 538
539#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + 0x1a4) 539#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
540#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8) 540#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
541#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac) 541#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
542#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0) 542#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
@@ -726,21 +726,24 @@
726 726
727#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \ 727#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
728 (AR_SREV_9462(ah) ? 0x16290 : 0x16284)) 728 (AR_SREV_9462(ah) ? 0x16290 : 0x16284))
729#define AR_CH0_TOP2_XPABIASLVL 0xf000 729#define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000)
730#define AR_CH0_TOP2_XPABIASLVL_S 12 730#define AR_CH0_TOP2_XPABIASLVL_S 12
731 731
732#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \ 732#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
733 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : 0x16290)) 733 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \
734 (AR_SREV_9561(ah) ? 0x162c0 : 0x16290)))
734#define AR_CH0_XTAL_CAPINDAC 0x7f000000 735#define AR_CH0_XTAL_CAPINDAC 0x7f000000
735#define AR_CH0_XTAL_CAPINDAC_S 24 736#define AR_CH0_XTAL_CAPINDAC_S 24
736#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000 737#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
737#define AR_CH0_XTAL_CAPOUTDAC_S 17 738#define AR_CH0_XTAL_CAPOUTDAC_S 17
738 739
739#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : 0x16c40) 740#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : \
741 (AR_SREV_9561(ah) ? 0x16cc0 : 0x16c40))
740#define AR_PHY_PMU1_PWD 0x1 742#define AR_PHY_PMU1_PWD 0x1
741#define AR_PHY_PMU1_PWD_S 0 743#define AR_PHY_PMU1_PWD_S 0
742 744
743#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : 0x16c44) 745#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : \
746 (AR_SREV_9561(ah) ? 0x16cc4 : 0x16c44))
744#define AR_PHY_PMU2_PGM 0x00200000 747#define AR_PHY_PMU2_PGM 0x00200000
745#define AR_PHY_PMU2_PGM_S 21 748#define AR_PHY_PMU2_PGM_S 21
746 749
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
index 81c88dd606dc..86bfc9604dca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_wow.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
@@ -17,23 +17,9 @@
17#include <linux/export.h> 17#include <linux/export.h>
18#include "ath9k.h" 18#include "ath9k.h"
19#include "reg.h" 19#include "reg.h"
20#include "reg_wow.h"
20#include "hw-ops.h" 21#include "hw-ops.h"
21 22
22const char *ath9k_hw_wow_event_to_string(u32 wow_event)
23{
24 if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
25 return "Magic pattern";
26 if (wow_event & AH_WOW_USER_PATTERN_EN)
27 return "User pattern";
28 if (wow_event & AH_WOW_LINK_CHANGE)
29 return "Link change";
30 if (wow_event & AH_WOW_BEACON_MISS)
31 return "Beacon miss";
32
33 return "unknown reason";
34}
35EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
36
37static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah) 23static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
38{ 24{
39 struct ath_common *common = ath9k_hw_common(ah); 25 struct ath_common *common = ath9k_hw_common(ah);
@@ -49,6 +35,15 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
49 return; 35 return;
50 } 36 }
51 37
38 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
39 if (!REG_READ(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL))
40 REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
41 } else if (AR_SREV_9485(ah)){
42 if (!(REG_READ(ah, AR_NDP2_TIMER_MODE) &
43 AR_GEN_TIMERS2_MODE_ENABLE_MASK))
44 REG_CLR_BIT(ah, AR_DIRECT_CONNECT, AR_DC_TSF2_ENABLE);
45 }
46
52 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT); 47 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
53} 48}
54 49
@@ -67,11 +62,15 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
67 /* set the transmit buffer */ 62 /* set the transmit buffer */
68 ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16)); 63 ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
69 ctl[1] = 0; 64 ctl[1] = 0;
70 ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
71 ctl[4] = 0; 65 ctl[4] = 0;
72 ctl[7] = (ah->txchainmask) << 2; 66 ctl[7] = (ah->txchainmask) << 2;
73 ctl[2] = 0xf << 16; /* tx_tries 0 */ 67 ctl[2] = 0xf << 16; /* tx_tries 0 */
74 68
69 if (IS_CHAN_2GHZ(ah->curchan))
70 ctl[3] = 0x1b; /* CCK_1M */
71 else
72 ctl[3] = 0xb; /* OFDM_6M */
73
75 for (i = 0; i < KAL_NUM_DESC_WORDS; i++) 74 for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
76 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]); 75 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
77 76
@@ -103,21 +102,22 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
103 102
104} 103}
105 104
106void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern, 105int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
107 u8 *user_mask, int pattern_count, 106 u8 *user_mask, int pattern_count,
108 int pattern_len) 107 int pattern_len)
109{ 108{
110 int i; 109 int i;
111 u32 pattern_val, mask_val; 110 u32 pattern_val, mask_val;
112 u32 set, clr; 111 u32 set, clr;
113 112
114 /* FIXME: should check count by querying the hardware capability */ 113 if (pattern_count >= ah->wow.max_patterns)
115 if (pattern_count >= MAX_NUM_PATTERN) 114 return -ENOSPC;
116 return;
117 115
118 REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count)); 116 if (pattern_count < MAX_NUM_PATTERN_LEGACY)
117 REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
118 else
119 REG_SET_BIT(ah, AR_MAC_PCU_WOW4, BIT(pattern_count - 8));
119 120
120 /* set the registers for pattern */
121 for (i = 0; i < MAX_PATTERN_SIZE; i += 4) { 121 for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
122 memcpy(&pattern_val, user_pattern, 4); 122 memcpy(&pattern_val, user_pattern, 4);
123 REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i), 123 REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
@@ -125,49 +125,42 @@ void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
125 user_pattern += 4; 125 user_pattern += 4;
126 } 126 }
127 127
128 /* set the registers for mask */
129 for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) { 128 for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
130 memcpy(&mask_val, user_mask, 4); 129 memcpy(&mask_val, user_mask, 4);
131 REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val); 130 REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
132 user_mask += 4; 131 user_mask += 4;
133 } 132 }
134 133
135 /* set the pattern length to be matched 134 if (pattern_count < MAX_NUM_PATTERN_LEGACY)
136 * 135 ah->wow.wow_event_mask |=
137 * AR_WOW_LENGTH1_REG1 136 BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
138 * bit 31:24 pattern 0 length 137 else
139 * bit 23:16 pattern 1 length 138 ah->wow.wow_event_mask2 |=
140 * bit 15:8 pattern 2 length 139 BIT((pattern_count - 8) + AR_WOW_PAT_FOUND_SHIFT);
141 * bit 7:0 pattern 3 length
142 *
143 * AR_WOW_LENGTH1_REG2
144 * bit 31:24 pattern 4 length
145 * bit 23:16 pattern 5 length
146 * bit 15:8 pattern 6 length
147 * bit 7:0 pattern 7 length
148 *
149 * the below logic writes out the new
150 * pattern length for the corresponding
151 * pattern_count, while masking out the
152 * other fields
153 */
154
155 ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
156 140
157 if (pattern_count < 4) { 141 if (pattern_count < 4) {
158 /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
159 set = (pattern_len & AR_WOW_LENGTH_MAX) << 142 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
160 AR_WOW_LEN1_SHIFT(pattern_count); 143 AR_WOW_LEN1_SHIFT(pattern_count);
161 clr = AR_WOW_LENGTH1_MASK(pattern_count); 144 clr = AR_WOW_LENGTH1_MASK(pattern_count);
162 REG_RMW(ah, AR_WOW_LENGTH1, set, clr); 145 REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
163 } else { 146 } else if (pattern_count < 8) {
164 /* Pattern 4-7 uses AR_WOW_LENGTH2 register */
165 set = (pattern_len & AR_WOW_LENGTH_MAX) << 147 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
166 AR_WOW_LEN2_SHIFT(pattern_count); 148 AR_WOW_LEN2_SHIFT(pattern_count);
167 clr = AR_WOW_LENGTH2_MASK(pattern_count); 149 clr = AR_WOW_LENGTH2_MASK(pattern_count);
168 REG_RMW(ah, AR_WOW_LENGTH2, set, clr); 150 REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
151 } else if (pattern_count < 12) {
152 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
153 AR_WOW_LEN3_SHIFT(pattern_count);
154 clr = AR_WOW_LENGTH3_MASK(pattern_count);
155 REG_RMW(ah, AR_WOW_LENGTH3, set, clr);
156 } else if (pattern_count < MAX_NUM_PATTERN) {
157 set = (pattern_len & AR_WOW_LENGTH_MAX) <<
158 AR_WOW_LEN4_SHIFT(pattern_count);
159 clr = AR_WOW_LENGTH4_MASK(pattern_count);
160 REG_RMW(ah, AR_WOW_LENGTH4, set, clr);
169 } 161 }
170 162
163 return 0;
171} 164}
172EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern); 165EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
173 166
@@ -189,7 +182,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
189 * register. This mask will clean it up. 182 * register. This mask will clean it up.
190 */ 183 */
191 184
192 val &= ah->wow_event_mask; 185 val &= ah->wow.wow_event_mask;
193 186
194 if (val) { 187 if (val) {
195 if (val & AR_WOW_MAGIC_PAT_FOUND) 188 if (val & AR_WOW_MAGIC_PAT_FOUND)
@@ -233,190 +226,192 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
233 if (ah->is_pciexpress) 226 if (ah->is_pciexpress)
234 ath9k_hw_configpcipowersave(ah, false); 227 ath9k_hw_configpcipowersave(ah, false);
235 228
236 ah->wow_event_mask = 0; 229 ah->wow.wow_event_mask = 0;
237 230
238 return wow_status; 231 return wow_status;
239} 232}
240EXPORT_SYMBOL(ath9k_hw_wow_wakeup); 233EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
241 234
242void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable) 235static void ath9k_hw_wow_set_arwr_reg(struct ath_hw *ah)
243{ 236{
244 u32 wow_event_mask; 237 u32 wa_reg;
245 u32 set, clr;
246 238
247 /* 239 if (!ah->is_pciexpress)
248 * wow_event_mask is a mask to the AR_WOW_PATTERN register to 240 return;
249 * indicate which WoW events we have enabled. The WoW events
250 * are from the 'pattern_enable' in this function and
251 * 'pattern_count' of ath9k_hw_wow_apply_pattern()
252 */
253 wow_event_mask = ah->wow_event_mask;
254 241
255 /* 242 /*
256 * Untie Power-on-Reset from the PCI-E-Reset. When we are in 243 * We need to untie the internal POR (power-on-reset)
257 * WOW sleep, we do want the Reset from the PCI-E to disturb 244 * to the external PCI-E reset. We also need to tie
258 * our hw state 245 * the PCI-E Phy reset to the PCI-E reset.
259 */ 246 */
260 if (ah->is_pciexpress) { 247 wa_reg = REG_READ(ah, AR_WA);
261 /* 248 wa_reg &= ~AR_WA_UNTIE_RESET_EN;
262 * we need to untie the internal POR (power-on-reset) 249 wa_reg |= AR_WA_RESET_EN;
263 * to the external PCI-E reset. We also need to tie 250 wa_reg |= AR_WA_POR_SHORT;
264 * the PCI-E Phy reset to the PCI-E reset.
265 */
266 set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
267 clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
268 REG_RMW(ah, AR_WA, set, clr);
269 }
270 251
271 /* 252 REG_WRITE(ah, AR_WA, wa_reg);
272 * set the power states appropriately and enable PME 253}
273 */ 254
274 set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA | 255void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
275 AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR; 256{
257 u32 wow_event_mask;
258 u32 keep_alive, magic_pattern, host_pm_ctrl;
259
260 wow_event_mask = ah->wow.wow_event_mask;
276 261
277 /* 262 /*
278 * set and clear WOW_PME_CLEAR registers for the chip 263 * AR_PMCTRL_HOST_PME_EN - Override PME enable in configuration
264 * space and allow MAC to generate WoW anyway.
265 *
266 * AR_PMCTRL_PWR_PM_CTRL_ENA - ???
267 *
268 * AR_PMCTRL_AUX_PWR_DET - PCI core SYS_AUX_PWR_DET signal,
269 * needs to be set for WoW in PCI mode.
270 *
271 * AR_PMCTRL_WOW_PME_CLR - WoW Clear Signal going to the MAC.
272 *
273 * Set the power states appropriately and enable PME.
274 *
275 * Set and clear WOW_PME_CLEAR for the chip
279 * to generate next wow signal. 276 * to generate next wow signal.
280 */ 277 */
281 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set); 278 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_HOST_PME_EN |
282 clr = AR_PMCTRL_WOW_PME_CLR; 279 AR_PMCTRL_PWR_PM_CTRL_ENA |
283 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr); 280 AR_PMCTRL_AUX_PWR_DET |
281 AR_PMCTRL_WOW_PME_CLR);
282 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR);
284 283
285 /* 284 /*
286 * Setup for: 285 * Random Backoff.
287 * - beacon misses 286 *
288 * - magic pattern 287 * 31:28 in AR_WOW_PATTERN : Indicates the number of bits used in the
289 * - keep alive timeout 288 * contention window. For value N,
290 * - pattern matching 289 * the random backoff will be selected between
290 * 0 and (2 ^ N) - 1.
291 */ 291 */
292 REG_SET_BIT(ah, AR_WOW_PATTERN,
293 AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF));
292 294
293 /* 295 /*
294 * Program default values for pattern backoff, aifs/slot/KAL count, 296 * AIFS time, Slot time, Keep Alive count.
295 * beacon miss timeout, KAL timeout, etc. 297 */
298 REG_SET_BIT(ah, AR_WOW_COUNT, AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
299 AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
300 AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT));
301 /*
302 * Beacon timeout.
296 */ 303 */
297 set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
298 REG_SET_BIT(ah, AR_WOW_PATTERN, set);
299
300 set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
301 AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
302 AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
303 REG_SET_BIT(ah, AR_WOW_COUNT, set);
304
305 if (pattern_enable & AH_WOW_BEACON_MISS) 304 if (pattern_enable & AH_WOW_BEACON_MISS)
306 set = AR_WOW_BEACON_TIMO; 305 REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO);
307 /* We are not using beacon miss, program a large value */
308 else 306 else
309 set = AR_WOW_BEACON_TIMO_MAX; 307 REG_WRITE(ah, AR_WOW_BCN_TIMO, AR_WOW_BEACON_TIMO_MAX);
310
311 REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
312 308
313 /* 309 /*
314 * Keep alive timo in ms except AR9280 310 * Keep alive timeout in ms.
315 */ 311 */
316 if (!pattern_enable) 312 if (!pattern_enable)
317 set = AR_WOW_KEEP_ALIVE_NEVER; 313 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, AR_WOW_KEEP_ALIVE_NEVER);
318 else 314 else
319 set = KAL_TIMEOUT * 32; 315 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, KAL_TIMEOUT * 32);
320
321 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
322 316
323 /* 317 /*
324 * Keep alive delay in us. based on 'power on clock', 318 * Keep alive delay in us.
325 * therefore in usec
326 */ 319 */
327 set = KAL_DELAY * 1000; 320 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, KAL_DELAY * 1000);
328 REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
329 321
330 /* 322 /*
331 * Create keep alive pattern to respond to beacons 323 * Create keep alive pattern to respond to beacons.
332 */ 324 */
333 ath9k_wow_create_keep_alive_pattern(ah); 325 ath9k_wow_create_keep_alive_pattern(ah);
334 326
335 /* 327 /*
336 * Configure MAC WoW Registers 328 * Configure keep alive register.
337 */ 329 */
338 set = 0; 330 keep_alive = REG_READ(ah, AR_WOW_KEEP_ALIVE);
331
339 /* Send keep alive timeouts anyway */ 332 /* Send keep alive timeouts anyway */
340 clr = AR_WOW_KEEP_ALIVE_AUTO_DIS; 333 keep_alive &= ~AR_WOW_KEEP_ALIVE_AUTO_DIS;
341 334
342 if (pattern_enable & AH_WOW_LINK_CHANGE) 335 if (pattern_enable & AH_WOW_LINK_CHANGE) {
336 keep_alive &= ~AR_WOW_KEEP_ALIVE_FAIL_DIS;
343 wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL; 337 wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
344 else 338 } else {
345 set = AR_WOW_KEEP_ALIVE_FAIL_DIS; 339 keep_alive |= AR_WOW_KEEP_ALIVE_FAIL_DIS;
340 }
346 341
347 set = AR_WOW_KEEP_ALIVE_FAIL_DIS; 342 REG_WRITE(ah, AR_WOW_KEEP_ALIVE, keep_alive);
348 REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
349 343
350 /* 344 /*
351 * we are relying on a bmiss failure. ensure we have 345 * We are relying on a bmiss failure, ensure we have
352 * enough threshold to prevent false positives 346 * enough threshold to prevent false positives.
353 */ 347 */
354 REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR, 348 REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
355 AR_WOW_BMISSTHRESHOLD); 349 AR_WOW_BMISSTHRESHOLD);
356 350
357 set = 0;
358 clr = 0;
359
360 if (pattern_enable & AH_WOW_BEACON_MISS) { 351 if (pattern_enable & AH_WOW_BEACON_MISS) {
361 set = AR_WOW_BEACON_FAIL_EN;
362 wow_event_mask |= AR_WOW_BEACON_FAIL; 352 wow_event_mask |= AR_WOW_BEACON_FAIL;
353 REG_SET_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
363 } else { 354 } else {
364 clr = AR_WOW_BEACON_FAIL_EN; 355 REG_CLR_BIT(ah, AR_WOW_BCN_EN, AR_WOW_BEACON_FAIL_EN);
365 } 356 }
366 357
367 REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
368
369 set = 0;
370 clr = 0;
371 /* 358 /*
372 * Enable the magic packet registers 359 * Enable the magic packet registers.
373 */ 360 */
361 magic_pattern = REG_READ(ah, AR_WOW_PATTERN);
362 magic_pattern |= AR_WOW_MAC_INTR_EN;
363
374 if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) { 364 if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
375 set = AR_WOW_MAGIC_EN; 365 magic_pattern |= AR_WOW_MAGIC_EN;
376 wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND; 366 wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
377 } else { 367 } else {
378 clr = AR_WOW_MAGIC_EN; 368 magic_pattern &= ~AR_WOW_MAGIC_EN;
379 } 369 }
380 set |= AR_WOW_MAC_INTR_EN;
381 REG_RMW(ah, AR_WOW_PATTERN, set, clr);
382 370
371 REG_WRITE(ah, AR_WOW_PATTERN, magic_pattern);
372
373 /*
374 * Enable pattern matching for packets which are less
375 * than 256 bytes.
376 */
383 REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B, 377 REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
384 AR_WOW_PATTERN_SUPPORTED); 378 AR_WOW_PATTERN_SUPPORTED);
385 379
386 /* 380 /*
387 * Set the power states appropriately and enable PME 381 * Set the power states appropriately and enable PME.
388 */ 382 */
389 clr = 0; 383 host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL);
390 set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN | 384 host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3 |
391 AR_PMCTRL_PWR_PM_CTRL_ENA; 385 AR_PMCTRL_HOST_PME_EN |
386 AR_PMCTRL_PWR_PM_CTRL_ENA;
387 host_pm_ctrl &= ~AR_PCIE_PM_CTRL_ENA;
392 388
393 clr = AR_PCIE_PM_CTRL_ENA; 389 if (AR_SREV_9462(ah)) {
394 REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr); 390 /*
391 * This is needed to prevent the chip waking up
392 * the host within 3-4 seconds with certain
393 * platform/BIOS.
394 */
395 host_pm_ctrl &= ~AR_PMCTRL_PWR_STATE_D1D3;
396 host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3_REAL;
397 }
398
399 REG_WRITE(ah, AR_PCIE_PM_CTRL, host_pm_ctrl);
395 400
396 /* 401 /*
397 * this is needed to prevent the chip waking up 402 * Enable sequence number generation when asleep.
398 * the host within 3-4 seconds with certain
399 * platform/BIOS. The fix is to enable
400 * D1 & D3 to match original definition and
401 * also match the OTP value. Anyway this
402 * is more related to SW WOW.
403 */ 403 */
404 clr = AR_PMCTRL_PWR_STATE_D1D3; 404 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
405 REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
406 405
407 set = AR_PMCTRL_PWR_STATE_D1D3_REAL; 406 /* To bring down WOW power low margin */
408 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set); 407 REG_SET_BIT(ah, AR_PCIE_PHY_REG3, BIT(13));
409 408
410 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM); 409 ath9k_hw_wow_set_arwr_reg(ah);
411 410
412 /* to bring down WOW power low margin */
413 set = BIT(13);
414 REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
415 /* HW WoW */ 411 /* HW WoW */
416 clr = BIT(5); 412 REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, BIT(5));
417 REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
418 413
419 ath9k_hw_set_powermode_wow_sleep(ah); 414 ath9k_hw_set_powermode_wow_sleep(ah);
420 ah->wow_event_mask = wow_event_mask; 415 ah->wow.wow_event_mask = wow_event_mask;
421} 416}
422EXPORT_SYMBOL(ath9k_hw_wow_enable); 417EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index 159cc6fd2362..6fc0d07e5ec6 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -358,7 +358,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = {
358 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 358 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
359 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 359 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
360 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 360 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
361 {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822}, 361 {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10820, 0xcfa10820},
362 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, 362 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
363 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 363 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
364 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 364 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -378,7 +378,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = {
378 {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010}, 378 {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
379 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, 379 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
380 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 380 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
381 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 381 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
382 {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33}, 382 {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
383 {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982}, 383 {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
384 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 384 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index fd6a84ccd49e..148562addd38 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -63,7 +63,7 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = {
63 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 63 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
64 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 64 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
65 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 65 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
66 {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822}, 66 {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10820, 0xcfa10820},
67 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, 67 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
68 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 68 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
69 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 69 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -83,7 +83,7 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = {
83 {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010}, 83 {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
84 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, 84 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
85 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 85 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
86 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 86 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
87 {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33}, 87 {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
88 {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982}, 88 {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
89 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 89 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
new file mode 100644
index 000000000000..c3a47eaaf0c0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
@@ -0,0 +1,1046 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef INITVALS_956X_H
19#define INITVALS_956X_H
20
21#define qca956x_1p0_mac_core ar955x_1p0_mac_core
22
23#define qca956x_1p0_mac_postamble ar9331_1p1_mac_postamble
24
25#define qca956x_1p0_soc_preamble ar955x_1p0_soc_preamble
26
27#define qca956x_1p0_soc_postamble ar9300_2p2_soc_postamble
28
29#define qca956x_1p0_common_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
30
31#define qca956x_1p0_baseband_postamble_dfs_channel ar9300_2p2_baseband_postamble_dfs_channel
32
33#define qca956x_1p0_common_wo_xlna_rx_gain_bounds ar955x_1p0_common_wo_xlna_rx_gain_bounds
34
35#define qca956x_1p0_common_rx_gain_bounds ar955x_1p0_common_rx_gain_bounds
36
37#define qca956x_1p0_modes_fast_clock ar9462_2p0_modes_fast_clock
38
39static const u32 qca956x_1p0_baseband_core[][2] = {
40 /* Addr allmodes */
41 {0x00009800, 0xafe68e30},
42 {0x00009804, 0xfd14e000},
43 {0x00009808, 0x9c0a9f6b},
44 {0x0000980c, 0x04900000},
45 {0x00009814, 0x0280c00a},
46 {0x00009818, 0x00000000},
47 {0x0000981c, 0x00020028},
48 {0x00009834, 0x6400a190},
49 {0x00009838, 0x0108ecff},
50 {0x0000983c, 0x14000600},
51 {0x00009880, 0x201fff00},
52 {0x00009884, 0x00001042},
53 {0x000098a4, 0x00200400},
54 {0x000098b0, 0x32840cbf},
55 {0x000098bc, 0x00000002},
56 {0x000098d0, 0x004b6a8e},
57 {0x000098d4, 0x00000820},
58 {0x000098dc, 0x00000000},
59 {0x000098f0, 0x00000000},
60 {0x000098f4, 0x00000000},
61 {0x00009c04, 0xff55ff55},
62 {0x00009c08, 0x0320ff55},
63 {0x00009c0c, 0x00000000},
64 {0x00009c10, 0x00000000},
65 {0x00009c14, 0x00046384},
66 {0x00009c18, 0x05b6b440},
67 {0x00009c1c, 0x00b6b440},
68 {0x00009d00, 0xc080a333},
69 {0x00009d04, 0x40206c10},
70 {0x00009d08, 0x009c4060},
71 {0x00009d0c, 0x9883800a},
72 {0x00009d10, 0x01834061},
73 {0x00009d14, 0x00c0040b},
74 {0x00009d18, 0x00000000},
75 {0x00009e08, 0x0038230c},
76 {0x00009e24, 0x990bb514},
77 {0x00009e28, 0x0c6f0000},
78 {0x00009e30, 0x06336f77},
79 {0x00009e34, 0x6af6532f},
80 {0x00009e38, 0x0cc80c00},
81 {0x00009e40, 0x0d261820},
82 {0x00009e4c, 0x00001004},
83 {0x00009e50, 0x00ff03f1},
84 {0x00009fc0, 0x813e4789},
85 {0x00009fc4, 0x0001efb5},
86 {0x00009fcc, 0x40000014},
87 {0x00009fd0, 0x02993b93},
88 {0x0000a20c, 0x00000000},
89 {0x0000a218, 0x00000000},
90 {0x0000a21c, 0x00000000},
91 {0x0000a228, 0x10002310},
92 {0x0000a23c, 0x00000000},
93 {0x0000a244, 0x0c000000},
94 {0x0000a248, 0x00000140},
95 {0x0000a2a0, 0x00000007},
96 {0x0000a2c0, 0x00000007},
97 {0x0000a2c8, 0x00000000},
98 {0x0000a2d4, 0x00000000},
99 {0x0000a2ec, 0x00000000},
100 {0x0000a2f0, 0x00000000},
101 {0x0000a2f4, 0x00000000},
102 {0x0000a2f8, 0x00000000},
103 {0x0000a344, 0x00000000},
104 {0x0000a34c, 0x00000000},
105 {0x0000a350, 0x0000a000},
106 {0x0000a360, 0x00000000},
107 {0x0000a36c, 0x00000000},
108 {0x0000a384, 0x00000001},
109 {0x0000a388, 0x00000444},
110 {0x0000a38c, 0x00000000},
111 {0x0000a390, 0x210d0401},
112 {0x0000a394, 0xab9a7144},
113 {0x0000a398, 0x00000201},
114 {0x0000a39c, 0x42424848},
115 {0x0000a3a0, 0x3c466478},
116 {0x0000a3a4, 0x3a363600},
117 {0x0000a3a8, 0x0000003a},
118 {0x0000a3ac, 0x00000000},
119 {0x0000a3b0, 0x009011fe},
120 {0x0000a3b4, 0x00000034},
121 {0x0000a3b8, 0x00b3ec0a},
122 {0x0000a3bc, 0x00000036},
123 {0x0000a3c0, 0x20202020},
124 {0x0000a3c4, 0x22222220},
125 {0x0000a3c8, 0x20200020},
126 {0x0000a3cc, 0x20202020},
127 {0x0000a3d0, 0x20202020},
128 {0x0000a3d4, 0x20202020},
129 {0x0000a3d8, 0x20202020},
130 {0x0000a3dc, 0x20202020},
131 {0x0000a3e0, 0x20202020},
132 {0x0000a3e4, 0x20202020},
133 {0x0000a3e8, 0x20202020},
134 {0x0000a3ec, 0x20202020},
135 {0x0000a3f0, 0x00000000},
136 {0x0000a3f4, 0x00000000},
137 {0x0000a3f8, 0x0c9bd380},
138 {0x0000a3fc, 0x000f0f01},
139 {0x0000a400, 0x8fa91f01},
140 {0x0000a404, 0x00000000},
141 {0x0000a408, 0x0e79e5c6},
142 {0x0000a40c, 0x00820820},
143 {0x0000a414, 0x1ce739ce},
144 {0x0000a418, 0x2d0019ce},
145 {0x0000a41c, 0x1ce739ce},
146 {0x0000a420, 0x000001ce},
147 {0x0000a424, 0x1ce739ce},
148 {0x0000a428, 0x000001ce},
149 {0x0000a42c, 0x1ce739ce},
150 {0x0000a430, 0x1ce739ce},
151 {0x0000a434, 0x00000000},
152 {0x0000a438, 0x00001801},
153 {0x0000a43c, 0x00100000},
154 {0x0000a444, 0x00000000},
155 {0x0000a448, 0x05000080},
156 {0x0000a44c, 0x00000001},
157 {0x0000a450, 0x00010000},
158 {0x0000a454, 0x05000000},
159 {0x0000a458, 0x00000000},
160 {0x0000a644, 0xbfad9fee},
161 {0x0000a648, 0x0048660d},
162 {0x0000a64c, 0x00003c37},
163 {0x0000a670, 0x03020100},
164 {0x0000a674, 0x21200504},
165 {0x0000a678, 0x61602322},
166 {0x0000a67c, 0x65646362},
167 {0x0000a680, 0x6b6a6968},
168 {0x0000a684, 0xe2706d6c},
169 {0x0000a688, 0x000000e3},
170 {0x0000a690, 0x00000838},
171 {0x0000a7cc, 0x00000000},
172 {0x0000a7d0, 0x00000000},
173 {0x0000a7d4, 0x00000004},
174 {0x0000a7dc, 0x00000000},
175 {0x0000a8d0, 0x004b6a8e},
176 {0x0000a8d4, 0x00000820},
177 {0x0000a8dc, 0x00000000},
178 {0x0000a8f0, 0x00000000},
179 {0x0000a8f4, 0x00000000},
180 {0x0000b2d0, 0x00000080},
181 {0x0000b2d4, 0x00000000},
182 {0x0000b2ec, 0x00000000},
183 {0x0000b2f0, 0x00000000},
184 {0x0000b2f4, 0x00000000},
185 {0x0000b2f8, 0x00000000},
186 {0x0000b408, 0x0e79e5c0},
187 {0x0000b40c, 0x00820820},
188 {0x0000b420, 0x00000000},
189 {0x0000b8d0, 0x004b6a8e},
190 {0x0000b8d4, 0x00000820},
191 {0x0000b8dc, 0x00000000},
192 {0x0000b8f0, 0x00000000},
193 {0x0000b8f4, 0x00000000},
194 {0x0000c2d0, 0x00000080},
195 {0x0000c2d4, 0x00000000},
196 {0x0000c2ec, 0x00000000},
197 {0x0000c2f0, 0x00000000},
198 {0x0000c2f4, 0x00000000},
199 {0x0000c2f8, 0x00000000},
200 {0x0000c408, 0x0e79e5c0},
201 {0x0000c40c, 0x00820820},
202 {0x0000c420, 0x00000000},
203};
204
205static const u32 qca956x_1p0_baseband_postamble[][5] = {
206 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
207 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
208 {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
209 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac621f1, 0x5ac621f1},
210 {0x00009828, 0x06903081, 0x06903081, 0x07d43881, 0x07d43881},
211 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
212 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
213 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
214 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
215 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
216 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000de, 0x6c4000de},
217 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
218 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x337d605e, 0x337d5d5e},
219 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
220 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
221 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a6, 0x000003a6},
222 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
223 {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
224 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
225 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
226 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
227 {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x045c0cc4, 0x045c0cc0},
228 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
229 {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
230 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
231 {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
232 {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
233 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
234 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
235 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
236 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
237 {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
238 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
239 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
240 {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
241 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
242 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
243 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
244 {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
245 {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
246 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
247 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
248 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
249 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
250 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
251 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
252 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001a6, 0x000001a6},
253 {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
254 {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
255 {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
256 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
257 {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
258 {0x0000be20, 0x000001b5, 0x000001b5, 0x000001a6, 0x000001a6},
259 {0x0000c284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
260};
261
262static const u32 qca956x_1p0_radio_core[][2] = {
263 /* Addr allmodes */
264 {0x00016000, 0x36db6db6},
265 {0x00016004, 0x6db6db40},
266 {0x00016008, 0x73f00000},
267 {0x0001600c, 0x00000000},
268 {0x00016040, 0x3f80fff8},
269 {0x0001604c, 0x000f0278},
270 {0x00016050, 0x8036db6c},
271 {0x00016054, 0x6db60000},
272 {0x00016080, 0x00080000},
273 {0x00016084, 0x0e48048c},
274 {0x00016088, 0x14214514},
275 {0x0001608c, 0x119f080a},
276 {0x00016090, 0x24926490},
277 {0x00016094, 0x00000000},
278 {0x000160a0, 0xc2108ffe},
279 {0x000160a4, 0x812fc370},
280 {0x000160a8, 0x423c8000},
281 {0x000160b4, 0x92480000},
282 {0x000160c0, 0x006db6d8},
283 {0x000160c4, 0x24b6db6c},
284 {0x000160c8, 0x6db6db6c},
285 {0x000160cc, 0x6db6fb7c},
286 {0x000160d0, 0x6db6da44},
287 {0x00016100, 0x07ff8001},
288 {0x00016108, 0x00080010},
289 {0x00016144, 0x01884080},
290 {0x00016148, 0x00008058},
291 {0x00016288, 0x001c6000},
292 {0x0001628c, 0x50000000},
293 {0x000162c0, 0x4b962100},
294 {0x000162c4, 0x00000480},
295 {0x000162c8, 0x04000144},
296 {0x00016380, 0x00000000},
297 {0x00016384, 0x00000000},
298 {0x00016388, 0x00800700},
299 {0x0001638c, 0x00800700},
300 {0x00016390, 0x00800700},
301 {0x00016394, 0x00000000},
302 {0x00016398, 0x00000000},
303 {0x0001639c, 0x00000000},
304 {0x000163a0, 0x00000001},
305 {0x000163a4, 0x00000001},
306 {0x000163a8, 0x00000000},
307 {0x000163ac, 0x00000000},
308 {0x000163b0, 0x00000000},
309 {0x000163b4, 0x00000000},
310 {0x000163b8, 0x00000000},
311 {0x000163bc, 0x00000000},
312 {0x000163c0, 0x000000a0},
313 {0x000163c4, 0x000c0000},
314 {0x000163c8, 0x14021402},
315 {0x000163cc, 0x00001402},
316 {0x000163d0, 0x00000000},
317 {0x000163d4, 0x00000000},
318 {0x00016400, 0x36db6db6},
319 {0x00016404, 0x6db6db40},
320 {0x00016408, 0x73f00000},
321 {0x0001640c, 0x00000000},
322 {0x00016440, 0x3f80fff8},
323 {0x0001644c, 0x000f0278},
324 {0x00016450, 0x8036db6c},
325 {0x00016454, 0x6db60000},
326 {0x00016500, 0x07ff8001},
327 {0x00016508, 0x00080010},
328 {0x00016544, 0x01884080},
329 {0x00016548, 0x00008058},
330 {0x00016780, 0x00000000},
331 {0x00016784, 0x00000000},
332 {0x00016788, 0x00800700},
333 {0x0001678c, 0x00800700},
334 {0x00016790, 0x00800700},
335 {0x00016794, 0x00000000},
336 {0x00016798, 0x00000000},
337 {0x0001679c, 0x00000000},
338 {0x000167a0, 0x00000001},
339 {0x000167a4, 0x00000001},
340 {0x000167a8, 0x00000000},
341 {0x000167ac, 0x00000000},
342 {0x000167b0, 0x00000000},
343 {0x000167b4, 0x00000000},
344 {0x000167b8, 0x00000000},
345 {0x000167bc, 0x00000000},
346 {0x000167c0, 0x000000a0},
347 {0x000167c4, 0x000c0000},
348 {0x000167c8, 0x14021402},
349 {0x000167cc, 0x00001402},
350 {0x000167d0, 0x00000000},
351 {0x000167d4, 0x00000000},
352 {0x00016800, 0x36db6db6},
353 {0x00016804, 0x6db6db40},
354 {0x00016808, 0x73f00000},
355 {0x0001680c, 0x00000000},
356 {0x00016840, 0x3f80fff8},
357 {0x0001684c, 0x000f0278},
358 {0x00016850, 0x8036db6c},
359 {0x00016854, 0x6db60000},
360 {0x00016900, 0x07ff8001},
361 {0x00016908, 0x00080010},
362 {0x00016944, 0x01884080},
363 {0x00016948, 0x00008058},
364 {0x00016b80, 0x00000000},
365 {0x00016b84, 0x00000000},
366 {0x00016b88, 0x00800700},
367 {0x00016b8c, 0x00800700},
368 {0x00016b90, 0x00800700},
369 {0x00016b94, 0x00000000},
370 {0x00016b98, 0x00000000},
371 {0x00016b9c, 0x00000000},
372 {0x00016ba0, 0x00000001},
373 {0x00016ba4, 0x00000001},
374 {0x00016ba8, 0x00000000},
375 {0x00016bac, 0x00000000},
376 {0x00016bb0, 0x00000000},
377 {0x00016bb4, 0x00000000},
378 {0x00016bb8, 0x00000000},
379 {0x00016bbc, 0x00000000},
380 {0x00016bc0, 0x000000a0},
381 {0x00016bc4, 0x000c0000},
382 {0x00016bc8, 0x14021402},
383 {0x00016bcc, 0x00001402},
384 {0x00016bd0, 0x00000000},
385 {0x00016bd4, 0x00000000},
386};
387
388static const u32 qca956x_1p0_radio_postamble[][5] = {
389 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
390 {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xc4128f5c, 0xc4128f5c},
391 {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x0fd08f25, 0x0fd08f25},
392 {0x000160ac, 0xa4647c00, 0xa4647c00, 0x24646800, 0x24646800},
393 {0x000160b0, 0x01885f52, 0x01885f52, 0x00fe7f46, 0x00fe7f46},
394 {0x00016104, 0xb7a00000, 0xb7a00000, 0xfff80001, 0xfff80001},
395 {0x0001610c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
396 {0x00016140, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
397 {0x00016504, 0xb7a00000, 0xb7a00000, 0xfff80001, 0xfff80001},
398 {0x0001650c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
399 {0x00016540, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
400 {0x00016904, 0xb7a00000, 0xb7a00000, 0xfff80001, 0xfff80001},
401 {0x0001690c, 0xc0000000, 0xc0000000, 0x00000000, 0x00000000},
402 {0x00016940, 0x10804008, 0x10804008, 0x50804000, 0x50804000},
403};
404
405static const u32 qca956x_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
406 /* Addr allmodes */
407 {0x0000a38c, 0x00000000},
408 {0x0000a390, 0x6f7f0301},
409 {0x0000a394, 0xca9228ee},
410};
411
412static const u32 qca956x_1p0_modes_no_xpa_tx_gain_table[][3] = {
413 /* Addr 5G 2G */
414 {0x0000a2dc, 0xffa9ac94, 0xffa9ac94},
415 {0x0000a2e0, 0xff323118, 0xff323118},
416 {0x0000a2e4, 0xff3ffe00, 0xff3ffe00},
417 {0x0000a2e8, 0xffc00000, 0xffc00000},
418 {0x0000a39c, 0x42424242, 0x42424242},
419 {0x0000a3a4, 0x3a3e3e00, 0x3a3e3e00},
420 {0x0000a3b0, 0x00a01404, 0x00a01404},
421 {0x0000a3b4, 0x00000034, 0x00000034},
422 {0x0000a3b8, 0x00800408, 0x00800408},
423 {0x0000a3bc, 0x00000036, 0x00000036},
424 {0x0000a410, 0x000050dc, 0x000050dc},
425 {0x0000a500, 0x09000040, 0x09000040},
426 {0x0000a504, 0x0b000041, 0x0b000041},
427 {0x0000a508, 0x0d000042, 0x0d000042},
428 {0x0000a50c, 0x11000044, 0x11000044},
429 {0x0000a510, 0x15000046, 0x15000046},
430 {0x0000a514, 0x1d000440, 0x1d000440},
431 {0x0000a518, 0x1f000441, 0x1f000441},
432 {0x0000a51c, 0x23000443, 0x23000443},
433 {0x0000a520, 0x25000444, 0x25000444},
434 {0x0000a524, 0x280004e0, 0x280004e0},
435 {0x0000a528, 0x2c0004e2, 0x2c0004e2},
436 {0x0000a52c, 0x2e0004e3, 0x2e0004e3},
437 {0x0000a530, 0x300004e4, 0x300004e4},
438 {0x0000a534, 0x340004e6, 0x340004e6},
439 {0x0000a538, 0x37000ce0, 0x37000ce0},
440 {0x0000a53c, 0x3b000ce2, 0x3b000ce2},
441 {0x0000a540, 0x3d000ce3, 0x3d000ce3},
442 {0x0000a544, 0x3f000ce4, 0x3f000ce4},
443 {0x0000a548, 0x45001ee0, 0x45001ee0},
444 {0x0000a54c, 0x49001ee2, 0x49001ee2},
445 {0x0000a550, 0x4d001ee4, 0x4d001ee4},
446 {0x0000a554, 0x51001ee6, 0x51001ee6},
447 {0x0000a558, 0x55001eea, 0x55001eea},
448 {0x0000a55c, 0x59001eec, 0x59001eec},
449 {0x0000a560, 0x5d001ef0, 0x5d001ef0},
450 {0x0000a564, 0x5f001ef1, 0x5f001ef1},
451 {0x0000a568, 0x60001ef2, 0x60001ef2},
452 {0x0000a56c, 0x61001ef3, 0x61001ef3},
453 {0x0000a570, 0x62001ef4, 0x62001ef4},
454 {0x0000a574, 0x63001ef5, 0x63001ef5},
455 {0x0000a578, 0x64001ffc, 0x64001ffc},
456 {0x0000a57c, 0x64001ffc, 0x64001ffc},
457 {0x0000a600, 0x00000000, 0x00000000},
458 {0x0000a604, 0x00000000, 0x00000000},
459 {0x0000a608, 0x00000000, 0x00000000},
460 {0x0000a60c, 0x00000000, 0x00000000},
461 {0x0000a610, 0x00804000, 0x00804000},
462 {0x0000a614, 0x00804201, 0x00804201},
463 {0x0000a618, 0x00804201, 0x00804201},
464 {0x0000a61c, 0x00804201, 0x00804201},
465 {0x0000a620, 0x00804201, 0x00804201},
466 {0x0000a624, 0x00804201, 0x00804201},
467 {0x0000a628, 0x00804201, 0x00804201},
468 {0x0000a62c, 0x02808a02, 0x02808a02},
469 {0x0000a630, 0x0340cd03, 0x0340cd03},
470 {0x0000a634, 0x0340cd03, 0x0340cd03},
471 {0x0000a638, 0x0340cd03, 0x0340cd03},
472 {0x0000a63c, 0x05011404, 0x05011404},
473 {0x0000b2dc, 0xffa9ac94, 0xffa9ac94},
474 {0x0000b2e0, 0xff323118, 0xff323118},
475 {0x0000b2e4, 0xff3ffe00, 0xff3ffe00},
476 {0x0000b2e8, 0xffc00000, 0xffc00000},
477 {0x0000c2dc, 0xffa9ac94, 0xffa9ac94},
478 {0x0000c2e0, 0xff323118, 0xff323118},
479 {0x0000c2e4, 0xff3ffe00, 0xff3ffe00},
480 {0x0000c2e8, 0xffc00000, 0xffc00000},
481 {0x00016044, 0x049242db, 0x049242db},
482 {0x00016048, 0x64925a70, 0x64925a70},
483 {0x00016148, 0x00008050, 0x00008050},
484 {0x00016280, 0x41110005, 0x41110005},
485 {0x00016284, 0x453a6000, 0x453a6000},
486 {0x00016444, 0x049242db, 0x049242db},
487 {0x00016448, 0x6c925a70, 0x6c925a70},
488 {0x00016548, 0x00008050, 0x00008050},
489 {0x00016844, 0x049242db, 0x049242db},
490 {0x00016848, 0x6c925a70, 0x6c925a70},
491 {0x00016948, 0x00008050, 0x00008050},
492};
493
494static const u32 qca956x_1p0_modes_xpa_tx_gain_table[][3] = {
495 /* Addr 5G 2G */
496 {0x0000a2dc, 0xcc69ac94, 0xcc69ac94},
497 {0x0000a2e0, 0xf0b23118, 0xf0b23118},
498 {0x0000a2e4, 0xffffc000, 0xffffc000},
499 {0x0000a2e8, 0xc0000000, 0xc0000000},
500 {0x0000a410, 0x000050d2, 0x000050d2},
501 {0x0000a500, 0x0a000040, 0x0a000040},
502 {0x0000a504, 0x0c000041, 0x0c000041},
503 {0x0000a508, 0x0e000042, 0x0e000042},
504 {0x0000a50c, 0x12000044, 0x12000044},
505 {0x0000a510, 0x16000046, 0x16000046},
506 {0x0000a514, 0x1d000440, 0x1d000440},
507 {0x0000a518, 0x1f000441, 0x1f000441},
508 {0x0000a51c, 0x23000443, 0x23000443},
509 {0x0000a520, 0x25000444, 0x25000444},
510 {0x0000a524, 0x29000a40, 0x29000a40},
511 {0x0000a528, 0x2d000a42, 0x2d000a42},
512 {0x0000a52c, 0x2f000a43, 0x2f000a43},
513 {0x0000a530, 0x31000a44, 0x31000a44},
514 {0x0000a534, 0x35000a46, 0x35000a46},
515 {0x0000a538, 0x38000ce0, 0x38000ce0},
516 {0x0000a53c, 0x3c000ce2, 0x3c000ce2},
517 {0x0000a540, 0x3e000ce3, 0x3e000ce3},
518 {0x0000a544, 0x40000ce4, 0x40000ce4},
519 {0x0000a548, 0x46001ee0, 0x46001ee0},
520 {0x0000a54c, 0x4a001ee2, 0x4a001ee2},
521 {0x0000a550, 0x4e001ee4, 0x4e001ee4},
522 {0x0000a554, 0x52001ee6, 0x52001ee6},
523 {0x0000a558, 0x56001eea, 0x56001eea},
524 {0x0000a55c, 0x5a001eec, 0x5a001eec},
525 {0x0000a560, 0x5e001ef0, 0x5e001ef0},
526 {0x0000a564, 0x60001ef1, 0x60001ef1},
527 {0x0000a568, 0x61001ef2, 0x61001ef2},
528 {0x0000a56c, 0x62001ef3, 0x62001ef3},
529 {0x0000a570, 0x63001ef4, 0x63001ef4},
530 {0x0000a574, 0x64001ef5, 0x64001ef5},
531 {0x0000a578, 0x65001ffc, 0x65001ffc},
532 {0x0000a57c, 0x65001ffc, 0x65001ffc},
533 {0x0000a600, 0x00000000, 0x00000000},
534 {0x0000a604, 0x00000000, 0x00000000},
535 {0x0000a608, 0x00000000, 0x00000000},
536 {0x0000a60c, 0x00000000, 0x00000000},
537 {0x0000a610, 0x00000000, 0x00000000},
538 {0x0000a614, 0x00000000, 0x00000000},
539 {0x0000a618, 0x00000000, 0x00000000},
540 {0x0000a61c, 0x00804201, 0x00804201},
541 {0x0000a620, 0x00804201, 0x00804201},
542 {0x0000a624, 0x00804201, 0x00804201},
543 {0x0000a628, 0x00804201, 0x00804201},
544 {0x0000a62c, 0x02808a02, 0x02808a02},
545 {0x0000a630, 0x0340cd03, 0x0340cd03},
546 {0x0000a634, 0x0340cd03, 0x0340cd03},
547 {0x0000a638, 0x0340cd03, 0x0340cd03},
548 {0x0000a63c, 0x05011404, 0x05011404},
549 {0x0000b2dc, 0xcc69ac94, 0xcc69ac94},
550 {0x0000b2e0, 0xf0b23118, 0xf0b23118},
551 {0x0000b2e4, 0xffffc000, 0xffffc000},
552 {0x0000b2e8, 0xc0000000, 0xc0000000},
553 {0x0000c2dc, 0xcc69ac94, 0xcc69ac94},
554 {0x0000c2e0, 0xf0b23118, 0xf0b23118},
555 {0x0000c2e4, 0xffffc000, 0xffffc000},
556 {0x0000c2e8, 0xc0000000, 0xc0000000},
557 {0x00016044, 0x012492db, 0x012492db},
558 {0x00016048, 0x6c927a70, 0x6c927a70},
559 {0x00016050, 0x8036d36c, 0x8036d36c},
560 {0x00016280, 0x41110005, 0x41110005},
561 {0x00016284, 0x453a7e00, 0x453a7e00},
562 {0x00016444, 0x012492db, 0x012492db},
563 {0x00016448, 0x6c927a70, 0x6c927a70},
564 {0x00016450, 0x8036d36c, 0x8036d36c},
565 {0x00016844, 0x012492db, 0x012492db},
566 {0x00016848, 0x6c927a70, 0x6c927a70},
567 {0x00016850, 0x8036d36c, 0x8036d36c},
568};
569
570static const u32 qca956x_1p0_modes_no_xpa_low_ob_db_tx_gain_table[][3] = {
571 /* Addr 5G 2G */
572 {0x0000a2dc, 0xffa9ac94, 0xffa9ac94},
573 {0x0000a2e0, 0xff323118, 0xff323118},
574 {0x0000a2e4, 0xff3ffe00, 0xff3ffe00},
575 {0x0000a2e8, 0xffc00000, 0xffc00000},
576 {0x0000a39c, 0x42424242, 0x42424242},
577 {0x0000a3a4, 0x3a3e3e00, 0x3a3e3e00},
578 {0x0000a3b0, 0x00a01404, 0x00a01404},
579 {0x0000a3b4, 0x00000034, 0x00000034},
580 {0x0000a3b8, 0x00800408, 0x00800408},
581 {0x0000a3bc, 0x00000036, 0x00000036},
582 {0x0000a410, 0x000050dc, 0x000050dc},
583 {0x0000a414, 0x16b739ce, 0x16b739ce},
584 {0x0000a418, 0x2d00198b, 0x2d00198b},
585 {0x0000a41c, 0x16b5adce, 0x16b5adce},
586 {0x0000a420, 0x0000014a, 0x0000014a},
587 {0x0000a424, 0x14a525cc, 0x14a525cc},
588 {0x0000a428, 0x0000012a, 0x0000012a},
589 {0x0000a42c, 0x14a5294a, 0x14a5294a},
590 {0x0000a430, 0x1294a929, 0x1294a929},
591 {0x0000a500, 0x09000040, 0x09000040},
592 {0x0000a504, 0x0b000041, 0x0b000041},
593 {0x0000a508, 0x0d000042, 0x0d000042},
594 {0x0000a50c, 0x11000044, 0x11000044},
595 {0x0000a510, 0x15000046, 0x15000046},
596 {0x0000a514, 0x1d000440, 0x1d000440},
597 {0x0000a518, 0x1f000441, 0x1f000441},
598 {0x0000a51c, 0x23000443, 0x23000443},
599 {0x0000a520, 0x25000444, 0x25000444},
600 {0x0000a524, 0x280004e0, 0x280004e0},
601 {0x0000a528, 0x2c0004e2, 0x2c0004e2},
602 {0x0000a52c, 0x2e0004e3, 0x2e0004e3},
603 {0x0000a530, 0x300004e4, 0x300004e4},
604 {0x0000a534, 0x340004e6, 0x340004e6},
605 {0x0000a538, 0x37000ce0, 0x37000ce0},
606 {0x0000a53c, 0x3b000ce2, 0x3b000ce2},
607 {0x0000a540, 0x3d000ce3, 0x3d000ce3},
608 {0x0000a544, 0x3f000ce4, 0x3f000ce4},
609 {0x0000a548, 0x45001ee0, 0x45001ee0},
610 {0x0000a54c, 0x49001ee2, 0x49001ee2},
611 {0x0000a550, 0x4d001ee4, 0x4d001ee4},
612 {0x0000a554, 0x51001ee6, 0x51001ee6},
613 {0x0000a558, 0x55001eea, 0x55001eea},
614 {0x0000a55c, 0x59001eec, 0x59001eec},
615 {0x0000a560, 0x5d001ef0, 0x5d001ef0},
616 {0x0000a564, 0x5f001ef1, 0x5f001ef1},
617 {0x0000a568, 0x60001ef2, 0x60001ef2},
618 {0x0000a56c, 0x61001ef3, 0x61001ef3},
619 {0x0000a570, 0x62001ef4, 0x62001ef4},
620 {0x0000a574, 0x63001ef5, 0x63001ef5},
621 {0x0000a578, 0x64001ffc, 0x64001ffc},
622 {0x0000a57c, 0x64001ffc, 0x64001ffc},
623 {0x0000a600, 0x00000000, 0x00000000},
624 {0x0000a604, 0x00000000, 0x00000000},
625 {0x0000a608, 0x00000000, 0x00000000},
626 {0x0000a60c, 0x00000000, 0x00000000},
627 {0x0000a610, 0x00804000, 0x00804000},
628 {0x0000a614, 0x00804201, 0x00804201},
629 {0x0000a618, 0x00804201, 0x00804201},
630 {0x0000a61c, 0x00804201, 0x00804201},
631 {0x0000a620, 0x00804201, 0x00804201},
632 {0x0000a624, 0x00804201, 0x00804201},
633 {0x0000a628, 0x00804201, 0x00804201},
634 {0x0000a62c, 0x02808a02, 0x02808a02},
635 {0x0000a630, 0x0340cd03, 0x0340cd03},
636 {0x0000a634, 0x0340cd03, 0x0340cd03},
637 {0x0000a638, 0x0340cd03, 0x0340cd03},
638 {0x0000a63c, 0x05011404, 0x05011404},
639 {0x0000b2dc, 0xffa9ac94, 0xffa9ac94},
640 {0x0000b2e0, 0xff323118, 0xff323118},
641 {0x0000b2e4, 0xff3ffe00, 0xff3ffe00},
642 {0x0000b2e8, 0xffc00000, 0xffc00000},
643 {0x0000c2dc, 0xffa9ac94, 0xffa9ac94},
644 {0x0000c2e0, 0xff323118, 0xff323118},
645 {0x0000c2e4, 0xff3ffe00, 0xff3ffe00},
646 {0x0000c2e8, 0xffc00000, 0xffc00000},
647 {0x00016044, 0x046e42db, 0x046e42db},
648 {0x00016048, 0x64925a70, 0x64925a70},
649 {0x00016148, 0x00008050, 0x00008050},
650 {0x00016280, 0x41110005, 0x41110005},
651 {0x00016284, 0x453a6000, 0x453a6000},
652 {0x00016444, 0x046e42db, 0x046e42db},
653 {0x00016448, 0x6c925a70, 0x6c925a70},
654 {0x00016548, 0x00008050, 0x00008050},
655 {0x00016844, 0x046e42db, 0x046e42db},
656 {0x00016848, 0x6c925a70, 0x6c925a70},
657 {0x00016948, 0x00008050, 0x00008050},
658};
659
660static const u32 qca956x_1p0_modes_no_xpa_green_tx_gain_table[][3] = {
661 /* Addr 5G 2G */
662 {0x000098bc, 0x00000001, 0x00000001},
663 {0x0000a2dc, 0xd3555284, 0xd3555284},
664 {0x0000a2e0, 0x1c666318, 0x1c666318},
665 {0x0000a2e4, 0xe07bbc00, 0xe07bbc00},
666 {0x0000a2e8, 0xff800000, 0xff800000},
667 {0x0000a3a4, 0x3a3e3e00, 0x3a3e3e00},
668 {0x0000a410, 0x000050dc, 0x000050dc},
669 {0x0000a500, 0x02000040, 0x02000040},
670 {0x0000a504, 0x04000041, 0x04000041},
671 {0x0000a508, 0x06000042, 0x06000042},
672 {0x0000a50c, 0x0a000044, 0x0a000044},
673 {0x0000a510, 0x0c000045, 0x0c000045},
674 {0x0000a514, 0x13000440, 0x13000440},
675 {0x0000a518, 0x15000441, 0x15000441},
676 {0x0000a51c, 0x19000443, 0x19000443},
677 {0x0000a520, 0x1b000444, 0x1b000444},
678 {0x0000a524, 0x1e0004e0, 0x1e0004e0},
679 {0x0000a528, 0x220004e2, 0x220004e2},
680 {0x0000a52c, 0x240004e3, 0x240004e3},
681 {0x0000a530, 0x260004e4, 0x260004e4},
682 {0x0000a534, 0x2a0004e6, 0x2a0004e6},
683 {0x0000a538, 0x32000ce0, 0x32000ce0},
684 {0x0000a53c, 0x36000ce2, 0x36000ce2},
685 {0x0000a540, 0x3a000ce4, 0x3a000ce4},
686 {0x0000a544, 0x3e000ce6, 0x3e000ce6},
687 {0x0000a548, 0x45001ee0, 0x45001ee0},
688 {0x0000a54c, 0x49001ee2, 0x49001ee2},
689 {0x0000a550, 0x4d001ee4, 0x4d001ee4},
690 {0x0000a554, 0x51001ee6, 0x51001ee6},
691 {0x0000a558, 0x55001eea, 0x55001eea},
692 {0x0000a55c, 0x59001eec, 0x59001eec},
693 {0x0000a560, 0x5d001ef0, 0x5d001ef0},
694 {0x0000a564, 0x5f001ef1, 0x5f001ef1},
695 {0x0000a568, 0x60001ef2, 0x60001ef2},
696 {0x0000a56c, 0x61001ef3, 0x61001ef3},
697 {0x0000a570, 0x62001ef4, 0x62001ef4},
698 {0x0000a574, 0x63001ff5, 0x63001ff5},
699 {0x0000a578, 0x64001ffc, 0x64001ffc},
700 {0x0000a57c, 0x64001ffc, 0x64001ffc},
701 {0x0000a600, 0x00000000, 0x00000000},
702 {0x0000a604, 0x00000000, 0x00000000},
703 {0x0000a608, 0x00000000, 0x00000000},
704 {0x0000a60c, 0x00000000, 0x00000000},
705 {0x0000a610, 0x00804000, 0x00804000},
706 {0x0000a614, 0x00804201, 0x00804201},
707 {0x0000a618, 0x00804201, 0x00804201},
708 {0x0000a61c, 0x00804201, 0x00804201},
709 {0x0000a620, 0x00804201, 0x00804201},
710 {0x0000a624, 0x00804201, 0x00804201},
711 {0x0000a628, 0x00804201, 0x00804201},
712 {0x0000a62c, 0x02808a02, 0x02808a02},
713 {0x0000a630, 0x0340cd03, 0x0340cd03},
714 {0x0000a634, 0x0340cd03, 0x0340cd03},
715 {0x0000a638, 0x0340cd03, 0x0340cd03},
716 {0x0000a63c, 0x05011404, 0x05011404},
717 {0x0000b2dc, 0xd3555284, 0xd3555284},
718 {0x0000b2e0, 0x1c666318, 0x1c666318},
719 {0x0000b2e4, 0xe07bbc00, 0xe07bbc00},
720 {0x0000b2e8, 0xff800000, 0xff800000},
721 {0x0000c2dc, 0xd3555284, 0xd3555284},
722 {0x0000c2e0, 0x1c666318, 0x1c666318},
723 {0x0000c2e4, 0xe07bbc00, 0xe07bbc00},
724 {0x0000c2e8, 0xff800000, 0xff800000},
725 {0x00016044, 0x849242db, 0x849242db},
726 {0x00016048, 0x64925a70, 0x64925a70},
727 {0x00016280, 0x41110005, 0x41110005},
728 {0x00016284, 0x453a6000, 0x453a6000},
729 {0x00016444, 0x849242db, 0x849242db},
730 {0x00016448, 0x6c925a70, 0x6c925a70},
731 {0x00016844, 0x849242db, 0x849242db},
732 {0x00016848, 0x6c925a70, 0x6c925a70},
733 {0x0000a7f0, 0x800002cc, 0x800002cc},
734 {0x0000a7f4, 0x00000018, 0x00000018},
735 {0x0000a7f4, 0x00000018, 0x00000018},
736 {0x0000a7f4, 0x00000018, 0x00000018},
737 {0x0000a7f4, 0x00000018, 0x00000018},
738 {0x0000a7f4, 0x00000018, 0x00000018},
739 {0x0000a7f4, 0x00000018, 0x00000018},
740 {0x0000a7f4, 0x00000018, 0x00000018},
741 {0x0000a7f4, 0x00000018, 0x00000018},
742 {0x0000a7f4, 0x00000018, 0x00000018},
743 {0x0000a7f4, 0x00000018, 0x00000018},
744 {0x0000a7f4, 0x00000018, 0x00000018},
745 {0x0000a7f4, 0x00000018, 0x00000018},
746 {0x0000a7f4, 0x00000018, 0x00000018},
747 {0x0000a7f4, 0x00000018, 0x00000018},
748 {0x0000a7f4, 0x00000028, 0x00000028},
749 {0x0000a7f4, 0x00000028, 0x00000028},
750 {0x0000a7f4, 0x00000028, 0x00000028},
751 {0x0000a7f4, 0x00000028, 0x00000028},
752 {0x0000a7f4, 0x00000048, 0x00000048},
753 {0x0000a7f4, 0x00000048, 0x00000048},
754 {0x0000a7f4, 0x00000048, 0x00000048},
755 {0x0000a7f4, 0x00000048, 0x00000048},
756 {0x0000a7f4, 0x00000048, 0x00000048},
757 {0x0000a7f4, 0x00000048, 0x00000048},
758 {0x0000a7f4, 0x00000048, 0x00000048},
759 {0x0000a7f4, 0x00000048, 0x00000048},
760 {0x0000a7f4, 0x00000048, 0x00000048},
761 {0x0000a7f4, 0x00000048, 0x00000048},
762 {0x0000a7f4, 0x00000048, 0x00000048},
763 {0x0000a7f4, 0x00000048, 0x00000048},
764 {0x0000a7f4, 0x00000048, 0x00000048},
765 {0x0000a7f4, 0x00000048, 0x00000048},
766};
767
768static const u32 qca956x_1p0_common_rx_gain_table[][2] = {
769 /* Addr allmodes */
770 {0x0000a000, 0x00010000},
771 {0x0000a004, 0x00030002},
772 {0x0000a008, 0x00050004},
773 {0x0000a00c, 0x00810080},
774 {0x0000a010, 0x00830082},
775 {0x0000a014, 0x01810180},
776 {0x0000a018, 0x01830182},
777 {0x0000a01c, 0x01850184},
778 {0x0000a020, 0x01890188},
779 {0x0000a024, 0x018b018a},
780 {0x0000a028, 0x018d018c},
781 {0x0000a02c, 0x01910190},
782 {0x0000a030, 0x01930192},
783 {0x0000a034, 0x01950194},
784 {0x0000a038, 0x038a0196},
785 {0x0000a03c, 0x038c038b},
786 {0x0000a040, 0x0390038d},
787 {0x0000a044, 0x03920391},
788 {0x0000a048, 0x03940393},
789 {0x0000a04c, 0x03960395},
790 {0x0000a050, 0x00000000},
791 {0x0000a054, 0x00000000},
792 {0x0000a058, 0x00000000},
793 {0x0000a05c, 0x00000000},
794 {0x0000a060, 0x00000000},
795 {0x0000a064, 0x00000000},
796 {0x0000a068, 0x00000000},
797 {0x0000a06c, 0x00000000},
798 {0x0000a070, 0x00000000},
799 {0x0000a074, 0x00000000},
800 {0x0000a078, 0x00000000},
801 {0x0000a07c, 0x00000000},
802 {0x0000a080, 0x22222222},
803 {0x0000a084, 0x1d1d1d1d},
804 {0x0000a088, 0x1d1d1d1d},
805 {0x0000a08c, 0x1d1d1d1d},
806 {0x0000a090, 0x17171717},
807 {0x0000a094, 0x11111717},
808 {0x0000a098, 0x00030311},
809 {0x0000a09c, 0x00000000},
810 {0x0000a0a0, 0x00000000},
811 {0x0000a0a4, 0x00000000},
812 {0x0000a0a8, 0x00000000},
813 {0x0000a0ac, 0x00000000},
814 {0x0000a0b0, 0x00000000},
815 {0x0000a0b4, 0x00000000},
816 {0x0000a0b8, 0x00000000},
817 {0x0000a0bc, 0x00000000},
818 {0x0000a0c0, 0x001f0000},
819 {0x0000a0c4, 0x01000101},
820 {0x0000a0c8, 0x011e011f},
821 {0x0000a0cc, 0x011c011d},
822 {0x0000a0d0, 0x02030204},
823 {0x0000a0d4, 0x02010202},
824 {0x0000a0d8, 0x021f0200},
825 {0x0000a0dc, 0x0302021e},
826 {0x0000a0e0, 0x03000301},
827 {0x0000a0e4, 0x031e031f},
828 {0x0000a0e8, 0x0402031d},
829 {0x0000a0ec, 0x04000401},
830 {0x0000a0f0, 0x041e041f},
831 {0x0000a0f4, 0x0502041d},
832 {0x0000a0f8, 0x05000501},
833 {0x0000a0fc, 0x051e051f},
834 {0x0000a100, 0x06010602},
835 {0x0000a104, 0x061f0600},
836 {0x0000a108, 0x061d061e},
837 {0x0000a10c, 0x07020703},
838 {0x0000a110, 0x07000701},
839 {0x0000a114, 0x00000000},
840 {0x0000a118, 0x00000000},
841 {0x0000a11c, 0x00000000},
842 {0x0000a120, 0x00000000},
843 {0x0000a124, 0x00000000},
844 {0x0000a128, 0x00000000},
845 {0x0000a12c, 0x00000000},
846 {0x0000a130, 0x00000000},
847 {0x0000a134, 0x00000000},
848 {0x0000a138, 0x00000000},
849 {0x0000a13c, 0x00000000},
850 {0x0000a140, 0x001f0000},
851 {0x0000a144, 0x01000101},
852 {0x0000a148, 0x011e011f},
853 {0x0000a14c, 0x011c011d},
854 {0x0000a150, 0x02030204},
855 {0x0000a154, 0x02010202},
856 {0x0000a158, 0x021f0200},
857 {0x0000a15c, 0x0302021e},
858 {0x0000a160, 0x03000301},
859 {0x0000a164, 0x031e031f},
860 {0x0000a168, 0x0402031d},
861 {0x0000a16c, 0x04000401},
862 {0x0000a170, 0x041e041f},
863 {0x0000a174, 0x0502041d},
864 {0x0000a178, 0x05000501},
865 {0x0000a17c, 0x051e051f},
866 {0x0000a180, 0x06010602},
867 {0x0000a184, 0x061f0600},
868 {0x0000a188, 0x061d061e},
869 {0x0000a18c, 0x07020703},
870 {0x0000a190, 0x07000701},
871 {0x0000a194, 0x00000000},
872 {0x0000a198, 0x00000000},
873 {0x0000a19c, 0x00000000},
874 {0x0000a1a0, 0x00000000},
875 {0x0000a1a4, 0x00000000},
876 {0x0000a1a8, 0x00000000},
877 {0x0000a1ac, 0x00000000},
878 {0x0000a1b0, 0x00000000},
879 {0x0000a1b4, 0x00000000},
880 {0x0000a1b8, 0x00000000},
881 {0x0000a1bc, 0x00000000},
882 {0x0000a1c0, 0x00000000},
883 {0x0000a1c4, 0x00000000},
884 {0x0000a1c8, 0x00000000},
885 {0x0000a1cc, 0x00000000},
886 {0x0000a1d0, 0x00000000},
887 {0x0000a1d4, 0x00000000},
888 {0x0000a1d8, 0x00000000},
889 {0x0000a1dc, 0x00000000},
890 {0x0000a1e0, 0x00000000},
891 {0x0000a1e4, 0x00000000},
892 {0x0000a1e8, 0x00000000},
893 {0x0000a1ec, 0x00000000},
894 {0x0000a1f0, 0x00000396},
895 {0x0000a1f4, 0x00000396},
896 {0x0000a1f8, 0x00000396},
897 {0x0000a1fc, 0x00000196},
898 {0x0000b000, 0x00010000},
899 {0x0000b004, 0x00030002},
900 {0x0000b008, 0x00050004},
901 {0x0000b00c, 0x00810080},
902 {0x0000b010, 0x00830082},
903 {0x0000b014, 0x01810180},
904 {0x0000b018, 0x01830182},
905 {0x0000b01c, 0x01850184},
906 {0x0000b020, 0x02810280},
907 {0x0000b024, 0x02830282},
908 {0x0000b028, 0x02850284},
909 {0x0000b02c, 0x02890288},
910 {0x0000b030, 0x028b028a},
911 {0x0000b034, 0x0388028c},
912 {0x0000b038, 0x038a0389},
913 {0x0000b03c, 0x038c038b},
914 {0x0000b040, 0x0390038d},
915 {0x0000b044, 0x03920391},
916 {0x0000b048, 0x03940393},
917 {0x0000b04c, 0x03960395},
918 {0x0000b050, 0x00000000},
919 {0x0000b054, 0x00000000},
920 {0x0000b058, 0x00000000},
921 {0x0000b05c, 0x00000000},
922 {0x0000b060, 0x00000000},
923 {0x0000b064, 0x00000000},
924 {0x0000b068, 0x00000000},
925 {0x0000b06c, 0x00000000},
926 {0x0000b070, 0x00000000},
927 {0x0000b074, 0x00000000},
928 {0x0000b078, 0x00000000},
929 {0x0000b07c, 0x00000000},
930 {0x0000b080, 0x23232323},
931 {0x0000b084, 0x21232323},
932 {0x0000b088, 0x19191c1e},
933 {0x0000b08c, 0x12141417},
934 {0x0000b090, 0x07070e0e},
935 {0x0000b094, 0x03030305},
936 {0x0000b098, 0x00000003},
937 {0x0000b09c, 0x00000000},
938 {0x0000b0a0, 0x00000000},
939 {0x0000b0a4, 0x00000000},
940 {0x0000b0a8, 0x00000000},
941 {0x0000b0ac, 0x00000000},
942 {0x0000b0b0, 0x00000000},
943 {0x0000b0b4, 0x00000000},
944 {0x0000b0b8, 0x00000000},
945 {0x0000b0bc, 0x00000000},
946 {0x0000b0c0, 0x003f0020},
947 {0x0000b0c4, 0x00400041},
948 {0x0000b0c8, 0x0140005f},
949 {0x0000b0cc, 0x0160015f},
950 {0x0000b0d0, 0x017e017f},
951 {0x0000b0d4, 0x02410242},
952 {0x0000b0d8, 0x025f0240},
953 {0x0000b0dc, 0x027f0260},
954 {0x0000b0e0, 0x0341027e},
955 {0x0000b0e4, 0x035f0340},
956 {0x0000b0e8, 0x037f0360},
957 {0x0000b0ec, 0x04400441},
958 {0x0000b0f0, 0x0460045f},
959 {0x0000b0f4, 0x0541047f},
960 {0x0000b0f8, 0x055f0540},
961 {0x0000b0fc, 0x057f0560},
962 {0x0000b100, 0x06400641},
963 {0x0000b104, 0x0660065f},
964 {0x0000b108, 0x067e067f},
965 {0x0000b10c, 0x07410742},
966 {0x0000b110, 0x075f0740},
967 {0x0000b114, 0x077f0760},
968 {0x0000b118, 0x07800781},
969 {0x0000b11c, 0x07a0079f},
970 {0x0000b120, 0x07c107bf},
971 {0x0000b124, 0x000007c0},
972 {0x0000b128, 0x00000000},
973 {0x0000b12c, 0x00000000},
974 {0x0000b130, 0x00000000},
975 {0x0000b134, 0x00000000},
976 {0x0000b138, 0x00000000},
977 {0x0000b13c, 0x00000000},
978 {0x0000b140, 0x003f0020},
979 {0x0000b144, 0x00400041},
980 {0x0000b148, 0x0140005f},
981 {0x0000b14c, 0x0160015f},
982 {0x0000b150, 0x017e017f},
983 {0x0000b154, 0x02410242},
984 {0x0000b158, 0x025f0240},
985 {0x0000b15c, 0x027f0260},
986 {0x0000b160, 0x0341027e},
987 {0x0000b164, 0x035f0340},
988 {0x0000b168, 0x037f0360},
989 {0x0000b16c, 0x04400441},
990 {0x0000b170, 0x0460045f},
991 {0x0000b174, 0x0541047f},
992 {0x0000b178, 0x055f0540},
993 {0x0000b17c, 0x057f0560},
994 {0x0000b180, 0x06400641},
995 {0x0000b184, 0x0660065f},
996 {0x0000b188, 0x067e067f},
997 {0x0000b18c, 0x07410742},
998 {0x0000b190, 0x075f0740},
999 {0x0000b194, 0x077f0760},
1000 {0x0000b198, 0x07800781},
1001 {0x0000b19c, 0x07a0079f},
1002 {0x0000b1a0, 0x07c107bf},
1003 {0x0000b1a4, 0x000007c0},
1004 {0x0000b1a8, 0x00000000},
1005 {0x0000b1ac, 0x00000000},
1006 {0x0000b1b0, 0x00000000},
1007 {0x0000b1b4, 0x00000000},
1008 {0x0000b1b8, 0x00000000},
1009 {0x0000b1bc, 0x00000000},
1010 {0x0000b1c0, 0x00000000},
1011 {0x0000b1c4, 0x00000000},
1012 {0x0000b1c8, 0x00000000},
1013 {0x0000b1cc, 0x00000000},
1014 {0x0000b1d0, 0x00000000},
1015 {0x0000b1d4, 0x00000000},
1016 {0x0000b1d8, 0x00000000},
1017 {0x0000b1dc, 0x00000000},
1018 {0x0000b1e0, 0x00000000},
1019 {0x0000b1e4, 0x00000000},
1020 {0x0000b1e8, 0x00000000},
1021 {0x0000b1ec, 0x00000000},
1022 {0x0000b1f0, 0x00000396},
1023 {0x0000b1f4, 0x00000396},
1024 {0x0000b1f8, 0x00000396},
1025 {0x0000b1fc, 0x00000196},
1026};
1027
1028static const u32 qca956x_1p0_xlna_only[][5] = {
1029 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1030 {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
1031 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac621f1, 0x5ac621f1},
1032 {0x00009828, 0x06903081, 0x06903081, 0x07d43881, 0x07d43881},
1033 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x03721720},
1034 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000de, 0x6c4000da},
1035 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec8ad2e},
1036 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x317a6062, 0x317a5ae2},
1037 {0x00009e18, 0x00000000, 0x00000000, 0x03c00000, 0x03c00000},
1038 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003b2, 0x000003b2},
1039 {0x00009fc0, 0x813e4788, 0x813e4788, 0x813e4789, 0x813e4789},
1040 {0x0000ae18, 0x00000000, 0x00000000, 0x03c00000, 0x03c00000},
1041 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001b2, 0x000001b2},
1042 {0x0000be18, 0x00000000, 0x00000000, 0x03c00000, 0x03c00000},
1043 {0x0000be20, 0x000001b5, 0x000001b5, 0x000001b2, 0x000001b2},
1044};
1045
1046#endif /* INITVALS_956X_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1a9fe0983a6b..0f8e9464e4ab 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -34,7 +34,7 @@ struct ath_vif;
34 34
35extern struct ieee80211_ops ath9k_ops; 35extern struct ieee80211_ops ath9k_ops;
36extern int ath9k_modparam_nohwcrypt; 36extern int ath9k_modparam_nohwcrypt;
37extern int led_blink; 37extern int ath9k_led_blink;
38extern bool is_ath9k_unloaded; 38extern bool is_ath9k_unloaded;
39extern int ath9k_use_chanctx; 39extern int ath9k_use_chanctx;
40 40
@@ -830,14 +830,9 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
830/* Wake on Wireless LAN */ 830/* Wake on Wireless LAN */
831/************************/ 831/************************/
832 832
833struct ath9k_wow_pattern {
834 u8 pattern_bytes[MAX_PATTERN_SIZE];
835 u8 mask_bytes[MAX_PATTERN_SIZE];
836 u32 pattern_len;
837};
838
839#ifdef CONFIG_ATH9K_WOW 833#ifdef CONFIG_ATH9K_WOW
840void ath9k_init_wow(struct ieee80211_hw *hw); 834void ath9k_init_wow(struct ieee80211_hw *hw);
835void ath9k_deinit_wow(struct ieee80211_hw *hw);
841int ath9k_suspend(struct ieee80211_hw *hw, 836int ath9k_suspend(struct ieee80211_hw *hw,
842 struct cfg80211_wowlan *wowlan); 837 struct cfg80211_wowlan *wowlan);
843int ath9k_resume(struct ieee80211_hw *hw); 838int ath9k_resume(struct ieee80211_hw *hw);
@@ -846,6 +841,9 @@ void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled);
846static inline void ath9k_init_wow(struct ieee80211_hw *hw) 841static inline void ath9k_init_wow(struct ieee80211_hw *hw)
847{ 842{
848} 843}
844static inline void ath9k_deinit_wow(struct ieee80211_hw *hw)
845{
846}
849static inline int ath9k_suspend(struct ieee80211_hw *hw, 847static inline int ath9k_suspend(struct ieee80211_hw *hw,
850 struct cfg80211_wowlan *wowlan) 848 struct cfg80211_wowlan *wowlan)
851{ 849{
@@ -1039,9 +1037,8 @@ struct ath_softc {
1039 s16 tx99_power; 1037 s16 tx99_power;
1040 1038
1041#ifdef CONFIG_ATH9K_WOW 1039#ifdef CONFIG_ATH9K_WOW
1042 atomic_t wow_got_bmiss_intr;
1043 atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
1044 u32 wow_intr_before_sleep; 1040 u32 wow_intr_before_sleep;
1041 bool force_wow;
1045#endif 1042#endif
1046}; 1043};
1047 1044
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index ec93ddf0863a..5cee231cca1f 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -582,7 +582,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
582 582
583void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) 583void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
584{ 584{
585 if (config_enabled(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) { 585 if (config_enabled(CONFIG_ATH9K_DEBUGFS)) {
586 relay_close(spec_priv->rfs_chan_spec_scan); 586 relay_close(spec_priv->rfs_chan_spec_scan);
587 spec_priv->rfs_chan_spec_scan = NULL; 587 spec_priv->rfs_chan_spec_scan = NULL;
588 } 588 }
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 871e969409bf..50a2e0ac3b8b 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -403,7 +403,8 @@ static const struct file_operations fops_antenna_diversity = {
403 403
404static int read_file_dma(struct seq_file *file, void *data) 404static int read_file_dma(struct seq_file *file, void *data)
405{ 405{
406 struct ath_softc *sc = file->private; 406 struct ieee80211_hw *hw = dev_get_drvdata(file->private);
407 struct ath_softc *sc = hw->priv;
407 struct ath_hw *ah = sc->sc_ah; 408 struct ath_hw *ah = sc->sc_ah;
408 u32 val[ATH9K_NUM_DMA_DEBUG_REGS]; 409 u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
409 int i, qcuOffset = 0, dcuOffset = 0; 410 int i, qcuOffset = 0, dcuOffset = 0;
@@ -470,20 +471,6 @@ static int read_file_dma(struct seq_file *file, void *data)
470 return 0; 471 return 0;
471} 472}
472 473
473static int open_file_dma(struct inode *inode, struct file *f)
474{
475 return single_open(f, read_file_dma, inode->i_private);
476}
477
478static const struct file_operations fops_dma = {
479 .open = open_file_dma,
480 .read = seq_read,
481 .owner = THIS_MODULE,
482 .llseek = seq_lseek,
483 .release = single_release,
484};
485
486
487void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status) 474void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
488{ 475{
489 if (status) 476 if (status)
@@ -539,7 +526,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
539 526
540static int read_file_interrupt(struct seq_file *file, void *data) 527static int read_file_interrupt(struct seq_file *file, void *data)
541{ 528{
542 struct ath_softc *sc = file->private; 529 struct ieee80211_hw *hw = dev_get_drvdata(file->private);
530 struct ath_softc *sc = hw->priv;
543 531
544#define PR_IS(a, s) \ 532#define PR_IS(a, s) \
545 do { \ 533 do { \
@@ -600,22 +588,10 @@ static int read_file_interrupt(struct seq_file *file, void *data)
600 return 0; 588 return 0;
601} 589}
602 590
603static int open_file_interrupt(struct inode *inode, struct file *f)
604{
605 return single_open(f, read_file_interrupt, inode->i_private);
606}
607
608static const struct file_operations fops_interrupt = {
609 .read = seq_read,
610 .open = open_file_interrupt,
611 .owner = THIS_MODULE,
612 .llseek = seq_lseek,
613 .release = single_release,
614};
615
616static int read_file_xmit(struct seq_file *file, void *data) 591static int read_file_xmit(struct seq_file *file, void *data)
617{ 592{
618 struct ath_softc *sc = file->private; 593 struct ieee80211_hw *hw = dev_get_drvdata(file->private);
594 struct ath_softc *sc = hw->priv;
619 595
620 seq_printf(file, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO"); 596 seq_printf(file, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
621 597
@@ -661,7 +637,8 @@ static void print_queue(struct ath_softc *sc, struct ath_txq *txq,
661 637
662static int read_file_queues(struct seq_file *file, void *data) 638static int read_file_queues(struct seq_file *file, void *data)
663{ 639{
664 struct ath_softc *sc = file->private; 640 struct ieee80211_hw *hw = dev_get_drvdata(file->private);
641 struct ath_softc *sc = hw->priv;
665 struct ath_txq *txq; 642 struct ath_txq *txq;
666 int i; 643 int i;
667 static const char *qname[4] = { 644 static const char *qname[4] = {
@@ -682,7 +659,8 @@ static int read_file_queues(struct seq_file *file, void *data)
682 659
683static int read_file_misc(struct seq_file *file, void *data) 660static int read_file_misc(struct seq_file *file, void *data)
684{ 661{
685 struct ath_softc *sc = file->private; 662 struct ieee80211_hw *hw = dev_get_drvdata(file->private);
663 struct ath_softc *sc = hw->priv;
686 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 664 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
687 struct ath9k_vif_iter_data iter_data; 665 struct ath9k_vif_iter_data iter_data;
688 struct ath_chanctx *ctx; 666 struct ath_chanctx *ctx;
@@ -773,7 +751,8 @@ static int read_file_misc(struct seq_file *file, void *data)
773 751
774static int read_file_reset(struct seq_file *file, void *data) 752static int read_file_reset(struct seq_file *file, void *data)
775{ 753{
776 struct ath_softc *sc = file->private; 754 struct ieee80211_hw *hw = dev_get_drvdata(file->private);
755 struct ath_softc *sc = hw->priv;
777 static const char * const reset_cause[__RESET_TYPE_MAX] = { 756 static const char * const reset_cause[__RESET_TYPE_MAX] = {
778 [RESET_TYPE_BB_HANG] = "Baseband Hang", 757 [RESET_TYPE_BB_HANG] = "Baseband Hang",
779 [RESET_TYPE_BB_WATCHDOG] = "Baseband Watchdog", 758 [RESET_TYPE_BB_WATCHDOG] = "Baseband Watchdog",
@@ -837,58 +816,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
837 TX_STAT_INC(qnum, delim_underrun); 816 TX_STAT_INC(qnum, delim_underrun);
838} 817}
839 818
840static int open_file_xmit(struct inode *inode, struct file *f)
841{
842 return single_open(f, read_file_xmit, inode->i_private);
843}
844
845static const struct file_operations fops_xmit = {
846 .read = seq_read,
847 .open = open_file_xmit,
848 .owner = THIS_MODULE,
849 .llseek = seq_lseek,
850 .release = single_release,
851};
852
853static int open_file_queues(struct inode *inode, struct file *f)
854{
855 return single_open(f, read_file_queues, inode->i_private);
856}
857
858static const struct file_operations fops_queues = {
859 .read = seq_read,
860 .open = open_file_queues,
861 .owner = THIS_MODULE,
862 .llseek = seq_lseek,
863 .release = single_release,
864};
865
866static int open_file_misc(struct inode *inode, struct file *f)
867{
868 return single_open(f, read_file_misc, inode->i_private);
869}
870
871static const struct file_operations fops_misc = {
872 .read = seq_read,
873 .open = open_file_misc,
874 .owner = THIS_MODULE,
875 .llseek = seq_lseek,
876 .release = single_release,
877};
878
879static int open_file_reset(struct inode *inode, struct file *f)
880{
881 return single_open(f, read_file_reset, inode->i_private);
882}
883
884static const struct file_operations fops_reset = {
885 .read = seq_read,
886 .open = open_file_reset,
887 .owner = THIS_MODULE,
888 .llseek = seq_lseek,
889 .release = single_release,
890};
891
892void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) 819void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
893{ 820{
894 ath9k_cmn_debug_stat_rx(&sc->debug.stats.rxstats, rs); 821 ath9k_cmn_debug_stat_rx(&sc->debug.stats.rxstats, rs);
@@ -1018,7 +945,8 @@ static const struct file_operations fops_regdump = {
1018 945
1019static int read_file_dump_nfcal(struct seq_file *file, void *data) 946static int read_file_dump_nfcal(struct seq_file *file, void *data)
1020{ 947{
1021 struct ath_softc *sc = file->private; 948 struct ieee80211_hw *hw = dev_get_drvdata(file->private);
949 struct ath_softc *sc = hw->priv;
1022 struct ath_hw *ah = sc->sc_ah; 950 struct ath_hw *ah = sc->sc_ah;
1023 struct ath9k_nfcal_hist *h = sc->cur_chan->caldata.nfCalHist; 951 struct ath9k_nfcal_hist *h = sc->cur_chan->caldata.nfCalHist;
1024 struct ath_common *common = ath9k_hw_common(ah); 952 struct ath_common *common = ath9k_hw_common(ah);
@@ -1115,6 +1043,133 @@ static const struct file_operations fops_ackto = {
1115}; 1043};
1116#endif 1044#endif
1117 1045
1046#ifdef CONFIG_ATH9K_WOW
1047
1048static ssize_t read_file_wow(struct file *file, char __user *user_buf,
1049 size_t count, loff_t *ppos)
1050{
1051 struct ath_softc *sc = file->private_data;
1052 unsigned int len = 0, size = 32;
1053 ssize_t retval;
1054 char *buf;
1055
1056 buf = kzalloc(size, GFP_KERNEL);
1057 if (!buf)
1058 return -ENOMEM;
1059
1060 len += scnprintf(buf + len, size - len, "WOW: %s\n",
1061 sc->force_wow ? "ENABLED" : "DISABLED");
1062
1063 if (len > size)
1064 len = size;
1065
1066 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1067 kfree(buf);
1068
1069 return retval;
1070}
1071
1072static ssize_t write_file_wow(struct file *file, const char __user *user_buf,
1073 size_t count, loff_t *ppos)
1074{
1075 struct ath_softc *sc = file->private_data;
1076 unsigned long val;
1077 char buf[32];
1078 ssize_t len;
1079
1080 len = min(count, sizeof(buf) - 1);
1081 if (copy_from_user(buf, user_buf, len))
1082 return -EFAULT;
1083
1084 buf[len] = '\0';
1085 if (kstrtoul(buf, 0, &val))
1086 return -EINVAL;
1087
1088 if (val != 1)
1089 return -EINVAL;
1090
1091 if (!sc->force_wow) {
1092 sc->force_wow = true;
1093 ath9k_init_wow(sc->hw);
1094 }
1095
1096 return count;
1097}
1098
1099static const struct file_operations fops_wow = {
1100 .read = read_file_wow,
1101 .write = write_file_wow,
1102 .open = simple_open,
1103 .owner = THIS_MODULE,
1104 .llseek = default_llseek,
1105};
1106
1107#endif
1108
1109static ssize_t read_file_tpc(struct file *file, char __user *user_buf,
1110 size_t count, loff_t *ppos)
1111{
1112 struct ath_softc *sc = file->private_data;
1113 struct ath_hw *ah = sc->sc_ah;
1114 unsigned int len = 0, size = 32;
1115 ssize_t retval;
1116 char *buf;
1117
1118 buf = kzalloc(size, GFP_KERNEL);
1119 if (!buf)
1120 return -ENOMEM;
1121
1122 len += scnprintf(buf + len, size - len, "%s\n",
1123 ah->tpc_enabled ? "ENABLED" : "DISABLED");
1124
1125 if (len > size)
1126 len = size;
1127
1128 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1129 kfree(buf);
1130
1131 return retval;
1132}
1133
1134static ssize_t write_file_tpc(struct file *file, const char __user *user_buf,
1135 size_t count, loff_t *ppos)
1136{
1137 struct ath_softc *sc = file->private_data;
1138 struct ath_hw *ah = sc->sc_ah;
1139 unsigned long val;
1140 char buf[32];
1141 ssize_t len;
1142 bool tpc_enabled;
1143
1144 len = min(count, sizeof(buf) - 1);
1145 if (copy_from_user(buf, user_buf, len))
1146 return -EFAULT;
1147
1148 buf[len] = '\0';
1149 if (kstrtoul(buf, 0, &val))
1150 return -EINVAL;
1151
1152 if (val < 0 || val > 1)
1153 return -EINVAL;
1154
1155 tpc_enabled = !!val;
1156
1157 if (tpc_enabled != ah->tpc_enabled) {
1158 ah->tpc_enabled = tpc_enabled;
1159 ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
1160 }
1161
1162 return count;
1163}
1164
1165static const struct file_operations fops_tpc = {
1166 .read = read_file_tpc,
1167 .write = write_file_tpc,
1168 .open = simple_open,
1169 .owner = THIS_MODULE,
1170 .llseek = default_llseek,
1171};
1172
1118/* Ethtool support for get-stats */ 1173/* Ethtool support for get-stats */
1119 1174
1120#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO" 1175#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@@ -1260,14 +1315,14 @@ int ath9k_init_debug(struct ath_hw *ah)
1260 ath9k_tx99_init_debug(sc); 1315 ath9k_tx99_init_debug(sc);
1261 ath9k_cmn_spectral_init_debug(&sc->spec_priv, sc->debug.debugfs_phy); 1316 ath9k_cmn_spectral_init_debug(&sc->spec_priv, sc->debug.debugfs_phy);
1262 1317
1263 debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc, 1318 debugfs_create_devm_seqfile(sc->dev, "dma", sc->debug.debugfs_phy,
1264 &fops_dma); 1319 read_file_dma);
1265 debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc, 1320 debugfs_create_devm_seqfile(sc->dev, "interrupt", sc->debug.debugfs_phy,
1266 &fops_interrupt); 1321 read_file_interrupt);
1267 debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc, 1322 debugfs_create_devm_seqfile(sc->dev, "xmit", sc->debug.debugfs_phy,
1268 &fops_xmit); 1323 read_file_xmit);
1269 debugfs_create_file("queues", S_IRUSR, sc->debug.debugfs_phy, sc, 1324 debugfs_create_devm_seqfile(sc->dev, "queues", sc->debug.debugfs_phy,
1270 &fops_queues); 1325 read_file_queues);
1271 debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1326 debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1272 &sc->tx.txq_max_pending[IEEE80211_AC_BK]); 1327 &sc->tx.txq_max_pending[IEEE80211_AC_BK]);
1273 debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1328 debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
@@ -1276,10 +1331,10 @@ int ath9k_init_debug(struct ath_hw *ah)
1276 &sc->tx.txq_max_pending[IEEE80211_AC_VI]); 1331 &sc->tx.txq_max_pending[IEEE80211_AC_VI]);
1277 debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1332 debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1278 &sc->tx.txq_max_pending[IEEE80211_AC_VO]); 1333 &sc->tx.txq_max_pending[IEEE80211_AC_VO]);
1279 debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc, 1334 debugfs_create_devm_seqfile(sc->dev, "misc", sc->debug.debugfs_phy,
1280 &fops_misc); 1335 read_file_misc);
1281 debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc, 1336 debugfs_create_devm_seqfile(sc->dev, "reset", sc->debug.debugfs_phy,
1282 &fops_reset); 1337 read_file_reset);
1283 1338
1284 ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats); 1339 ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
1285 ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats); 1340 ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
@@ -1301,8 +1356,9 @@ int ath9k_init_debug(struct ath_hw *ah)
1301 &ah->config.cwm_ignore_extcca); 1356 &ah->config.cwm_ignore_extcca);
1302 debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, sc, 1357 debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, sc,
1303 &fops_regdump); 1358 &fops_regdump);
1304 debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc, 1359 debugfs_create_devm_seqfile(sc->dev, "dump_nfcal",
1305 &fops_dump_nfcal); 1360 sc->debug.debugfs_phy,
1361 read_file_dump_nfcal);
1306 1362
1307 ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah); 1363 ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
1308 ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah); 1364 ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
@@ -1320,10 +1376,17 @@ int ath9k_init_debug(struct ath_hw *ah)
1320 &fops_btcoex); 1376 &fops_btcoex);
1321#endif 1377#endif
1322 1378
1379#ifdef CONFIG_ATH9K_WOW
1380 debugfs_create_file("wow", S_IRUSR | S_IWUSR,
1381 sc->debug.debugfs_phy, sc, &fops_wow);
1382#endif
1383
1323#ifdef CONFIG_ATH9K_DYNACK 1384#ifdef CONFIG_ATH9K_DYNACK
1324 debugfs_create_file("ack_to", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1385 debugfs_create_file("ack_to", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1325 sc, &fops_ackto); 1386 sc, &fops_ackto);
1326#endif 1387#endif
1388 debugfs_create_file("tpc", S_IRUSR | S_IWUSR,
1389 sc->debug.debugfs_phy, sc, &fops_tpc);
1327 1390
1328 return 0; 1391 return 0;
1329} 1392}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 07b806c56c56..e5a78d4fd66e 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -748,6 +748,20 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
748 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); 748 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
749 } 749 }
750 750
751 /* TPC initializations */
752 if (ah->tpc_enabled) {
753 int ht40_delta;
754
755 ht40_delta = (IS_CHAN_HT40(chan)) ? ht40PowerIncForPdadc : 0;
756 ar5008_hw_init_rate_txpower(ah, ratesArray, chan, ht40_delta);
757 /* Enable TPC */
758 REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX,
759 MAX_RATE_POWER | AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
760 } else {
761 /* Disable TPC */
762 REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX, MAX_RATE_POWER);
763 }
764
751 REGWRITE_BUFFER_FLUSH(ah); 765 REGWRITE_BUFFER_FLUSH(ah);
752} 766}
753 767
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 5ba1385c9838..6ca33dfde1fd 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -886,6 +886,21 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
886 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) 886 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
887 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); 887 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
888 } 888 }
889
890 /* TPC initializations */
891 if (ah->tpc_enabled) {
892 int ht40_delta;
893
894 ht40_delta = (IS_CHAN_HT40(chan)) ? ht40PowerIncForPdadc : 0;
895 ar5008_hw_init_rate_txpower(ah, ratesArray, chan, ht40_delta);
896 /* Enable TPC */
897 REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX,
898 MAX_RATE_POWER | AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
899 } else {
900 /* Disable TPC */
901 REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX, MAX_RATE_POWER);
902 }
903
889 REGWRITE_BUFFER_FLUSH(ah); 904 REGWRITE_BUFFER_FLUSH(ah);
890} 905}
891 906
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 122b846b8ec0..098059039351 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -1332,6 +1332,20 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1332 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6) 1332 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
1333 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)); 1333 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
1334 1334
1335 /* TPC initializations */
1336 if (ah->tpc_enabled) {
1337 int ht40_delta;
1338
1339 ht40_delta = (IS_CHAN_HT40(chan)) ? ht40PowerIncForPdadc : 0;
1340 ar5008_hw_init_rate_txpower(ah, ratesArray, chan, ht40_delta);
1341 /* Enable TPC */
1342 REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX,
1343 MAX_RATE_POWER | AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE);
1344 } else {
1345 /* Disable TPC */
1346 REG_WRITE(ah, AR_PHY_POWER_TX_RATE_MAX, MAX_RATE_POWER);
1347 }
1348
1335 REGWRITE_BUFFER_FLUSH(ah); 1349 REGWRITE_BUFFER_FLUSH(ah);
1336} 1350}
1337 1351
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 2fef7a480fec..da344b27326c 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -49,7 +49,7 @@ void ath_init_leds(struct ath_softc *sc)
49 if (AR_SREV_9100(sc->sc_ah)) 49 if (AR_SREV_9100(sc->sc_ah))
50 return; 50 return;
51 51
52 if (!led_blink) 52 if (!ath9k_led_blink)
53 sc->led_cdev.default_trigger = 53 sc->led_cdev.default_trigger =
54 ieee80211_get_radio_led_name(sc->hw); 54 ieee80211_get_radio_led_name(sc->hw);
55 55
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 9dde265d3f84..300d3671d0ef 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -44,6 +44,9 @@
44 44
45extern struct ieee80211_ops ath9k_htc_ops; 45extern struct ieee80211_ops ath9k_htc_ops;
46extern int htc_modparam_nohwcrypt; 46extern int htc_modparam_nohwcrypt;
47#ifdef CONFIG_MAC80211_LEDS
48extern int ath9k_htc_led_blink;
49#endif
47 50
48enum htc_phymode { 51enum htc_phymode {
49 HTC_MODE_11NA = 0, 52 HTC_MODE_11NA = 0,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 50f74a2a4cf8..2aabcbdaba4e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -279,6 +279,10 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv)
279 else 279 else
280 priv->ah->led_pin = ATH_LED_PIN_DEF; 280 priv->ah->led_pin = ATH_LED_PIN_DEF;
281 281
282 if (!ath9k_htc_led_blink)
283 priv->led_cdev.default_trigger =
284 ieee80211_get_radio_led_name(priv->hw);
285
282 ath9k_configure_leds(priv); 286 ath9k_configure_leds(priv);
283 287
284 snprintf(priv->led_name, sizeof(priv->led_name), 288 snprintf(priv->led_name, sizeof(priv->led_name),
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index e8fa9448da24..fd229409f676 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -39,6 +39,10 @@ module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
39MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); 39MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
40 40
41#ifdef CONFIG_MAC80211_LEDS 41#ifdef CONFIG_MAC80211_LEDS
42int ath9k_htc_led_blink = 1;
43module_param_named(blink, ath9k_htc_led_blink, int, 0444);
44MODULE_PARM_DESC(blink, "Enable LED blink on activity");
45
42static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = { 46static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
43 { .throughput = 0 * 1024, .blink_time = 334 }, 47 { .throughput = 0 * 1024, .blink_time = 334 },
44 { .throughput = 1 * 1024, .blink_time = 260 }, 48 { .throughput = 1 * 1024, .blink_time = 260 },
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index a0ff5b637054..d2408da38c1c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -351,11 +351,7 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
351 351
352 return; 352 return;
353ret: 353ret:
354 /* HTC-generated packets are freed here. */ 354 kfree_skb(skb);
355 if (htc_hdr && htc_hdr->endpoint_id != ENDPOINT0)
356 dev_kfree_skb_any(skb);
357 else
358 kfree_skb(skb);
359} 355}
360 356
361static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle, 357static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 6d4b273469b1..60aa8d71e753 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -246,6 +246,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
246 case AR9300_DEVID_AR953X: 246 case AR9300_DEVID_AR953X:
247 ah->hw_version.macVersion = AR_SREV_VERSION_9531; 247 ah->hw_version.macVersion = AR_SREV_VERSION_9531;
248 return; 248 return;
249 case AR9300_DEVID_QCA956X:
250 ah->hw_version.macVersion = AR_SREV_VERSION_9561;
249 } 251 }
250 252
251 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 253 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
@@ -422,6 +424,8 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
422 ah->power_mode = ATH9K_PM_UNDEFINED; 424 ah->power_mode = ATH9K_PM_UNDEFINED;
423 ah->htc_reset_init = true; 425 ah->htc_reset_init = true;
424 426
427 ah->tpc_enabled = true;
428
425 ah->ani_function = ATH9K_ANI_ALL; 429 ah->ani_function = ATH9K_ANI_ALL;
426 if (!AR_SREV_9300_20_OR_LATER(ah)) 430 if (!AR_SREV_9300_20_OR_LATER(ah))
427 ah->ani_function &= ~ATH9K_ANI_MRC_CCK; 431 ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
@@ -536,6 +540,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
536 case AR_SREV_VERSION_9550: 540 case AR_SREV_VERSION_9550:
537 case AR_SREV_VERSION_9565: 541 case AR_SREV_VERSION_9565:
538 case AR_SREV_VERSION_9531: 542 case AR_SREV_VERSION_9531:
543 case AR_SREV_VERSION_9561:
539 break; 544 break;
540 default: 545 default:
541 ath_err(common, 546 ath_err(common,
@@ -636,6 +641,7 @@ int ath9k_hw_init(struct ath_hw *ah)
636 case AR9485_DEVID_AR1111: 641 case AR9485_DEVID_AR1111:
637 case AR9300_DEVID_AR9565: 642 case AR9300_DEVID_AR9565:
638 case AR9300_DEVID_AR953X: 643 case AR9300_DEVID_AR953X:
644 case AR9300_DEVID_QCA956X:
639 break; 645 break;
640 default: 646 default:
641 if (common->bus_ops->ath_bus_type == ATH_USB) 647 if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -776,7 +782,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
776 /* program BB PLL phase_shift */ 782 /* program BB PLL phase_shift */
777 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, 783 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
778 AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1); 784 AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
779 } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) { 785 } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
786 AR_SREV_9561(ah)) {
780 u32 regval, pll2_divint, pll2_divfrac, refdiv; 787 u32 regval, pll2_divint, pll2_divfrac, refdiv;
781 788
782 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 789 REG_WRITE(ah, AR_RTC_PLL_CONTROL,
@@ -787,7 +794,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
787 udelay(100); 794 udelay(100);
788 795
789 if (ah->is_clk_25mhz) { 796 if (ah->is_clk_25mhz) {
790 if (AR_SREV_9531(ah)) { 797 if (AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
791 pll2_divint = 0x1c; 798 pll2_divint = 0x1c;
792 pll2_divfrac = 0xa3d2; 799 pll2_divfrac = 0xa3d2;
793 refdiv = 1; 800 refdiv = 1;
@@ -803,14 +810,15 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
803 refdiv = 5; 810 refdiv = 5;
804 } else { 811 } else {
805 pll2_divint = 0x11; 812 pll2_divint = 0x11;
806 pll2_divfrac = 813 pll2_divfrac = (AR_SREV_9531(ah) ||
807 AR_SREV_9531(ah) ? 0x26665 : 0x26666; 814 AR_SREV_9561(ah)) ?
815 0x26665 : 0x26666;
808 refdiv = 1; 816 refdiv = 1;
809 } 817 }
810 } 818 }
811 819
812 regval = REG_READ(ah, AR_PHY_PLL_MODE); 820 regval = REG_READ(ah, AR_PHY_PLL_MODE);
813 if (AR_SREV_9531(ah)) 821 if (AR_SREV_9531(ah) || AR_SREV_9561(ah))
814 regval |= (0x1 << 22); 822 regval |= (0x1 << 22);
815 else 823 else
816 regval |= (0x1 << 16); 824 regval |= (0x1 << 16);
@@ -828,14 +836,16 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
828 (0x1 << 13) | 836 (0x1 << 13) |
829 (0x4 << 26) | 837 (0x4 << 26) |
830 (0x18 << 19); 838 (0x18 << 19);
831 else if (AR_SREV_9531(ah)) 839 else if (AR_SREV_9531(ah) || AR_SREV_9561(ah)) {
832 regval = (regval & 0x01c00fff) | 840 regval = (regval & 0x01c00fff) |
833 (0x1 << 31) | 841 (0x1 << 31) |
834 (0x2 << 29) | 842 (0x2 << 29) |
835 (0xa << 25) | 843 (0xa << 25) |
836 (0x1 << 19) | 844 (0x1 << 19);
837 (0x6 << 12); 845
838 else 846 if (AR_SREV_9531(ah))
847 regval |= (0x6 << 12);
848 } else
839 regval = (regval & 0x80071fff) | 849 regval = (regval & 0x80071fff) |
840 (0x3 << 30) | 850 (0x3 << 30) |
841 (0x1 << 13) | 851 (0x1 << 13) |
@@ -843,7 +853,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
843 (0x60 << 19); 853 (0x60 << 19);
844 REG_WRITE(ah, AR_PHY_PLL_MODE, regval); 854 REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
845 855
846 if (AR_SREV_9531(ah)) 856 if (AR_SREV_9531(ah) || AR_SREV_9561(ah))
847 REG_WRITE(ah, AR_PHY_PLL_MODE, 857 REG_WRITE(ah, AR_PHY_PLL_MODE,
848 REG_READ(ah, AR_PHY_PLL_MODE) & 0xffbfffff); 858 REG_READ(ah, AR_PHY_PLL_MODE) & 0xffbfffff);
849 else 859 else
@@ -882,7 +892,8 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
882 AR_IMR_RXORN | 892 AR_IMR_RXORN |
883 AR_IMR_BCNMISC; 893 AR_IMR_BCNMISC;
884 894
885 if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) 895 if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
896 AR_SREV_9561(ah))
886 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 897 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
887 898
888 if (AR_SREV_9300_20_OR_LATER(ah)) { 899 if (AR_SREV_9300_20_OR_LATER(ah)) {
@@ -1671,7 +1682,8 @@ static void ath9k_hw_init_desc(struct ath_hw *ah)
1671 } 1682 }
1672#ifdef __BIG_ENDIAN 1683#ifdef __BIG_ENDIAN
1673 else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) || 1684 else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
1674 AR_SREV_9550(ah) || AR_SREV_9531(ah)) 1685 AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
1686 AR_SREV_9561(ah))
1675 REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0); 1687 REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
1676 else 1688 else
1677 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); 1689 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -2459,7 +2471,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2459 2471
2460 if (AR_SREV_9300_20_OR_LATER(ah)) { 2472 if (AR_SREV_9300_20_OR_LATER(ah)) {
2461 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK; 2473 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
2462 if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) && !AR_SREV_9565(ah)) 2474 if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) &&
2475 !AR_SREV_9561(ah) && !AR_SREV_9565(ah))
2463 pCap->hw_caps |= ATH9K_HW_CAP_LDPC; 2476 pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
2464 2477
2465 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH; 2478 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
@@ -2476,7 +2489,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2476 if (AR_SREV_9300_20_OR_LATER(ah)) 2489 if (AR_SREV_9300_20_OR_LATER(ah))
2477 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED; 2490 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
2478 2491
2479 if (AR_SREV_9300_20_OR_LATER(ah)) 2492 if (AR_SREV_9561(ah))
2493 ah->ent_mode = 0x3BDA000;
2494 else if (AR_SREV_9300_20_OR_LATER(ah))
2480 ah->ent_mode = REG_READ(ah, AR_ENT_OTP); 2495 ah->ent_mode = REG_READ(ah, AR_ENT_OTP);
2481 2496
2482 if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah)) 2497 if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
@@ -2529,13 +2544,17 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2529 pCap->hw_caps |= ATH9K_HW_CAP_RTT; 2544 pCap->hw_caps |= ATH9K_HW_CAP_RTT;
2530 } 2545 }
2531 2546
2532 if (AR_SREV_9462(ah))
2533 pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE;
2534
2535 if (AR_SREV_9300_20_OR_LATER(ah) && 2547 if (AR_SREV_9300_20_OR_LATER(ah) &&
2536 ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) 2548 ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
2537 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; 2549 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
2538 2550
2551#ifdef CONFIG_ATH9K_WOW
2552 if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565_11_OR_LATER(ah))
2553 ah->wow.max_patterns = MAX_NUM_PATTERN;
2554 else
2555 ah->wow.max_patterns = MAX_NUM_PATTERN_LEGACY;
2556#endif
2557
2539 return 0; 2558 return 0;
2540} 2559}
2541 2560
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 1cbd33551513..e82e570de330 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -54,6 +54,7 @@
54#define AR9485_DEVID_AR1111 0x0037 54#define AR9485_DEVID_AR1111 0x0037
55#define AR9300_DEVID_AR9565 0x0036 55#define AR9300_DEVID_AR9565 0x0036
56#define AR9300_DEVID_AR953X 0x003d 56#define AR9300_DEVID_AR953X 0x003d
57#define AR9300_DEVID_QCA956X 0x003f
57 58
58#define AR5416_AR9100_DEVID 0x000b 59#define AR5416_AR9100_DEVID 0x000b
59 60
@@ -198,12 +199,13 @@
198#define KAL_NUM_DESC_WORDS 12 199#define KAL_NUM_DESC_WORDS 12
199#define KAL_ANTENNA_MODE 1 200#define KAL_ANTENNA_MODE 1
200#define KAL_TO_DS 1 201#define KAL_TO_DS 1
201#define KAL_DELAY 4 /*delay of 4ms between 2 KAL frames */ 202#define KAL_DELAY 4 /* delay of 4ms between 2 KAL frames */
202#define KAL_TIMEOUT 900 203#define KAL_TIMEOUT 900
203 204
204#define MAX_PATTERN_SIZE 256 205#define MAX_PATTERN_SIZE 256
205#define MAX_PATTERN_MASK_SIZE 32 206#define MAX_PATTERN_MASK_SIZE 32
206#define MAX_NUM_PATTERN 8 207#define MAX_NUM_PATTERN 16
208#define MAX_NUM_PATTERN_LEGACY 8
207#define MAX_NUM_USER_PATTERN 6 /* deducting the disassociate and 209#define MAX_NUM_USER_PATTERN 6 /* deducting the disassociate and
208 deauthenticate packets */ 210 deauthenticate packets */
209 211
@@ -247,12 +249,10 @@ enum ath9k_hw_caps {
247#ifdef CONFIG_ATH9K_PCOEM 249#ifdef CONFIG_ATH9K_PCOEM
248 ATH9K_HW_CAP_RTT = BIT(14), 250 ATH9K_HW_CAP_RTT = BIT(14),
249 ATH9K_HW_CAP_MCI = BIT(15), 251 ATH9K_HW_CAP_MCI = BIT(15),
250 ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(16),
251 ATH9K_HW_CAP_BT_ANT_DIV = BIT(17), 252 ATH9K_HW_CAP_BT_ANT_DIV = BIT(17),
252#else 253#else
253 ATH9K_HW_CAP_RTT = 0, 254 ATH9K_HW_CAP_RTT = 0,
254 ATH9K_HW_CAP_MCI = 0, 255 ATH9K_HW_CAP_MCI = 0,
255 ATH9K_HW_WOW_DEVICE_CAPABLE = 0,
256 ATH9K_HW_CAP_BT_ANT_DIV = 0, 256 ATH9K_HW_CAP_BT_ANT_DIV = 0,
257#endif 257#endif
258 ATH9K_HW_CAP_DFS = BIT(18), 258 ATH9K_HW_CAP_DFS = BIT(18),
@@ -271,6 +271,12 @@ enum ath9k_hw_caps {
271 * of those types. 271 * of those types.
272 */ 272 */
273 273
274struct ath9k_hw_wow {
275 u32 wow_event_mask;
276 u32 wow_event_mask2;
277 u8 max_patterns;
278};
279
274struct ath9k_hw_capabilities { 280struct ath9k_hw_capabilities {
275 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */ 281 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
276 u16 rts_aggr_limit; 282 u16 rts_aggr_limit;
@@ -929,7 +935,7 @@ struct ath_hw {
929 u32 ent_mode; 935 u32 ent_mode;
930 936
931#ifdef CONFIG_ATH9K_WOW 937#ifdef CONFIG_ATH9K_WOW
932 u32 wow_event_mask; 938 struct ath9k_hw_wow wow;
933#endif 939#endif
934 bool is_clk_25mhz; 940 bool is_clk_25mhz;
935 int (*get_mac_revision)(void); 941 int (*get_mac_revision)(void);
@@ -1086,6 +1092,8 @@ bool ar9003_is_paprd_enabled(struct ath_hw *ah);
1086void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx); 1092void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
1087void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array, 1093void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
1088 struct ath9k_channel *chan); 1094 struct ath9k_channel *chan);
1095void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
1096 struct ath9k_channel *chan, int ht40_delta);
1089 1097
1090/* Hardware family op attach helpers */ 1098/* Hardware family op attach helpers */
1091int ar5008_hw_attach_phy_ops(struct ath_hw *ah); 1099int ar5008_hw_attach_phy_ops(struct ath_hw *ah);
@@ -1145,23 +1153,19 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
1145 1153
1146 1154
1147#ifdef CONFIG_ATH9K_WOW 1155#ifdef CONFIG_ATH9K_WOW
1148const char *ath9k_hw_wow_event_to_string(u32 wow_event); 1156int ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
1149void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern, 1157 u8 *user_mask, int pattern_count,
1150 u8 *user_mask, int pattern_count, 1158 int pattern_len);
1151 int pattern_len);
1152u32 ath9k_hw_wow_wakeup(struct ath_hw *ah); 1159u32 ath9k_hw_wow_wakeup(struct ath_hw *ah);
1153void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable); 1160void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable);
1154#else 1161#else
1155static inline const char *ath9k_hw_wow_event_to_string(u32 wow_event) 1162static inline int ath9k_hw_wow_apply_pattern(struct ath_hw *ah,
1156{ 1163 u8 *user_pattern,
1157 return NULL; 1164 u8 *user_mask,
1158} 1165 int pattern_count,
1159static inline void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, 1166 int pattern_len)
1160 u8 *user_pattern,
1161 u8 *user_mask,
1162 int pattern_count,
1163 int pattern_len)
1164{ 1167{
1168 return 0;
1165} 1169}
1166static inline u32 ath9k_hw_wow_wakeup(struct ath_hw *ah) 1170static inline u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
1167{ 1171{
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d1c39346b264..6c6e88495394 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -45,8 +45,8 @@ int ath9k_modparam_nohwcrypt;
45module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444); 45module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
46MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 46MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
47 47
48int led_blink; 48int ath9k_led_blink;
49module_param_named(blink, led_blink, int, 0444); 49module_param_named(blink, ath9k_led_blink, int, 0444);
50MODULE_PARM_DESC(blink, "Enable LED blink on activity"); 50MODULE_PARM_DESC(blink, "Enable LED blink on activity");
51 51
52static int ath9k_btcoex_enable; 52static int ath9k_btcoex_enable;
@@ -996,6 +996,7 @@ void ath9k_deinit_device(struct ath_softc *sc)
996 ath9k_ps_restore(sc); 996 ath9k_ps_restore(sc);
997 997
998 ath9k_deinit_debug(sc); 998 ath9k_deinit_debug(sc);
999 ath9k_deinit_wow(hw);
999 ieee80211_unregister_hw(hw); 1000 ieee80211_unregister_hw(hw);
1000 ath_rx_cleanup(sc); 1001 ath_rx_cleanup(sc);
1001 ath9k_deinit_softc(sc); 1002 ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index b829263e3d0a..90631d768a60 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -516,14 +516,14 @@ int ath_update_survey_stats(struct ath_softc *sc)
516 ath_hw_cycle_counters_update(common); 516 ath_hw_cycle_counters_update(common);
517 517
518 if (cc->cycles > 0) { 518 if (cc->cycles > 0) {
519 survey->filled |= SURVEY_INFO_CHANNEL_TIME | 519 survey->filled |= SURVEY_INFO_TIME |
520 SURVEY_INFO_CHANNEL_TIME_BUSY | 520 SURVEY_INFO_TIME_BUSY |
521 SURVEY_INFO_CHANNEL_TIME_RX | 521 SURVEY_INFO_TIME_RX |
522 SURVEY_INFO_CHANNEL_TIME_TX; 522 SURVEY_INFO_TIME_TX;
523 survey->channel_time += cc->cycles / div; 523 survey->time += cc->cycles / div;
524 survey->channel_time_busy += cc->rx_busy / div; 524 survey->time_busy += cc->rx_busy / div;
525 survey->channel_time_rx += cc->rx_frame / div; 525 survey->time_rx += cc->rx_frame / div;
526 survey->channel_time_tx += cc->tx_frame / div; 526 survey->time_tx += cc->tx_frame / div;
527 } 527 }
528 528
529 if (cc->cycles < div) 529 if (cc->cycles < div)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 3e58bfa0c1fd..bba85d1a6cd1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -820,7 +820,8 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
820 return; 820 return;
821 } 821 }
822 822
823 if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) 823 if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
824 AR_SREV_9561(ah))
824 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 825 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
825 826
826 async_mask = AR_INTR_MAC_IRQ; 827 async_mask = AR_INTR_MAC_IRQ;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 62b0bf4fdf6b..9ede991b8d76 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -555,15 +555,6 @@ irqreturn_t ath_isr(int irq, void *dev)
555 (status & ATH9K_INT_BB_WATCHDOG)) 555 (status & ATH9K_INT_BB_WATCHDOG))
556 goto chip_reset; 556 goto chip_reset;
557 557
558#ifdef CONFIG_ATH9K_WOW
559 if (status & ATH9K_INT_BMISS) {
560 if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
561 atomic_inc(&sc->wow_got_bmiss_intr);
562 atomic_dec(&sc->wow_sleep_proc_intr);
563 }
564 }
565#endif
566
567 if (status & ATH9K_INT_SWBA) 558 if (status & ATH9K_INT_SWBA)
568 tasklet_schedule(&sc->bcon_tasklet); 559 tasklet_schedule(&sc->bcon_tasklet);
569 560
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f009b5b57e5e..e6fef1be9977 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -427,6 +427,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
427 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 427 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
428 0x0036, 428 0x0036,
429 0x11AD, /* LITEON */ 429 0x11AD, /* LITEON */
430 0x1842),
431 .driver_data = ATH9K_PCI_AR9565_1ANT },
432 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
433 0x0036,
434 0x11AD, /* LITEON */
430 0x6671), 435 0x6671),
431 .driver_data = ATH9K_PCI_AR9565_1ANT }, 436 .driver_data = ATH9K_PCI_AR9565_1ANT },
432 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 437 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -446,9 +451,19 @@ static const struct pci_device_id ath_pci_id_table[] = {
446 .driver_data = ATH9K_PCI_AR9565_1ANT }, 451 .driver_data = ATH9K_PCI_AR9565_1ANT },
447 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 452 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
448 0x0036, 453 0x0036,
454 0x1B9A, /* XAVI */
455 0x28A3),
456 .driver_data = ATH9K_PCI_AR9565_1ANT },
457 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
458 0x0036,
449 PCI_VENDOR_ID_AZWAVE, 459 PCI_VENDOR_ID_AZWAVE,
450 0x218A), 460 0x218A),
451 .driver_data = ATH9K_PCI_AR9565_1ANT }, 461 .driver_data = ATH9K_PCI_AR9565_1ANT },
462 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
463 0x0036,
464 PCI_VENDOR_ID_AZWAVE,
465 0x2F8A),
466 .driver_data = ATH9K_PCI_AR9565_1ANT },
452 467
453 /* WB335 1-ANT / Antenna Diversity */ 468 /* WB335 1-ANT / Antenna Diversity */
454 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 469 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@@ -508,6 +523,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
508 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV }, 523 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
509 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 524 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
510 0x0036, 525 0x0036,
526 PCI_VENDOR_ID_AZWAVE,
527 0x213C),
528 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
529 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
530 0x0036,
511 PCI_VENDOR_ID_HP, 531 PCI_VENDOR_ID_HP,
512 0x18E3), 532 0x18E3),
513 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV }, 533 .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -555,6 +575,16 @@ static const struct pci_device_id ath_pci_id_table[] = {
555 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, 575 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
556 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 576 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
557 0x0036, 577 0x0036,
578 PCI_VENDOR_ID_SAMSUNG,
579 0x4129),
580 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
581 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
582 0x0036,
583 PCI_VENDOR_ID_SAMSUNG,
584 0x412A),
585 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
586 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
587 0x0036,
558 PCI_VENDOR_ID_ATHEROS, 588 PCI_VENDOR_ID_ATHEROS,
559 0x3027), 589 0x3027),
560 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, 590 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -586,10 +616,25 @@ static const struct pci_device_id ath_pci_id_table[] = {
586 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 616 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
587 0x0036, 617 0x0036,
588 0x11AD, /* LITEON */ 618 0x11AD, /* LITEON */
619 0x1832),
620 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
621 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
622 0x0036,
623 0x11AD, /* LITEON */
589 0x0692), 624 0x0692),
590 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, 625 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
591 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 626 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
592 0x0036, 627 0x0036,
628 0x11AD, /* LITEON */
629 0x0803),
630 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
631 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
632 0x0036,
633 0x11AD, /* LITEON */
634 0x0813),
635 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
636 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
637 0x0036,
593 PCI_VENDOR_ID_AZWAVE, 638 PCI_VENDOR_ID_AZWAVE,
594 0x2130), 639 0x2130),
595 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, 640 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -605,6 +650,21 @@ static const struct pci_device_id ath_pci_id_table[] = {
605 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, 650 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
606 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 651 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
607 0x0036, 652 0x0036,
653 PCI_VENDOR_ID_AZWAVE,
654 0x218B),
655 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
656 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
657 0x0036,
658 PCI_VENDOR_ID_AZWAVE,
659 0x218C),
660 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
661 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
662 0x0036,
663 PCI_VENDOR_ID_AZWAVE,
664 0x2F82),
665 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
666 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
667 0x0036,
608 0x144F, /* ASKEY */ 668 0x144F, /* ASKEY */
609 0x7202), 669 0x7202),
610 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, 670 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -616,10 +676,20 @@ static const struct pci_device_id ath_pci_id_table[] = {
616 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 676 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
617 0x0036, 677 0x0036,
618 0x1B9A, /* XAVI */ 678 0x1B9A, /* XAVI */
679 0x2813),
680 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
681 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
682 0x0036,
683 0x1B9A, /* XAVI */
619 0x28A2), 684 0x28A2),
620 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, 685 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
621 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 686 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
622 0x0036, 687 0x0036,
688 0x1B9A, /* XAVI */
689 0x28A4),
690 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
691 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
692 0x0036,
623 0x185F, /* WNC */ 693 0x185F, /* WNC */
624 0x3027), 694 0x3027),
625 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, 695 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -636,10 +706,25 @@ static const struct pci_device_id ath_pci_id_table[] = {
636 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 706 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
637 0x0036, 707 0x0036,
638 PCI_VENDOR_ID_FOXCONN, 708 PCI_VENDOR_ID_FOXCONN,
709 0xE08F),
710 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
711 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
712 0x0036,
713 PCI_VENDOR_ID_FOXCONN,
639 0xE081), 714 0xE081),
640 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, 715 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
641 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, 716 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
642 0x0036, 717 0x0036,
718 PCI_VENDOR_ID_FOXCONN,
719 0xE091),
720 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
721 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
722 0x0036,
723 PCI_VENDOR_ID_FOXCONN,
724 0xE099),
725 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
726 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
727 0x0036,
643 PCI_VENDOR_ID_LENOVO, 728 PCI_VENDOR_ID_LENOVO,
644 0x3026), 729 0x3026),
645 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV }, 730 .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
@@ -913,9 +998,12 @@ static int ath_pci_suspend(struct device *device)
913 struct pci_dev *pdev = to_pci_dev(device); 998 struct pci_dev *pdev = to_pci_dev(device);
914 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 999 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
915 struct ath_softc *sc = hw->priv; 1000 struct ath_softc *sc = hw->priv;
1001 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
916 1002
917 if (sc->wow_enabled) 1003 if (test_bit(ATH_OP_WOW_ENABLED, &common->op_flags)) {
1004 dev_info(&pdev->dev, "WOW is enabled, bypassing PCI suspend\n");
918 return 0; 1005 return 0;
1006 }
919 1007
920 /* The device has to be moved to FULLSLEEP forcibly. 1008 /* The device has to be moved to FULLSLEEP forcibly.
921 * Otherwise the chip never moved to full sleep, 1009 * Otherwise the chip never moved to full sleep,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 7395afbc5124..6fb40ef86fd6 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -425,7 +425,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
425 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 425 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
426 } 426 }
427 427
428 if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah)) 428 if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah) ||
429 AR_SREV_9561(sc->sc_ah))
429 rfilt |= ATH9K_RX_FILTER_4ADDRESS; 430 rfilt |= ATH9K_RX_FILTER_4ADDRESS;
430 431
431 if (ath9k_is_chanctx_enabled() && 432 if (ath9k_is_chanctx_enabled() &&
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index fb11a9172f38..9587ec655680 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -814,6 +814,7 @@
814#define AR_SREV_REVISION_9531_10 0 814#define AR_SREV_REVISION_9531_10 0
815#define AR_SREV_REVISION_9531_11 1 815#define AR_SREV_REVISION_9531_11 1
816#define AR_SREV_REVISION_9531_20 2 816#define AR_SREV_REVISION_9531_20 2
817#define AR_SREV_VERSION_9561 0x600
817 818
818#define AR_SREV_5416(_ah) \ 819#define AR_SREV_5416(_ah) \
819 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ 820 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -899,10 +900,13 @@
899 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485)) 900 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
900#define AR_SREV_9565(_ah) \ 901#define AR_SREV_9565(_ah) \
901 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565)) 902 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
903#define AR_SREV_9003_PCOEM(_ah) \
904 (AR_SREV_9462(_ah) || AR_SREV_9485(_ah) || AR_SREV_9565(_ah))
902#else 905#else
903#define AR_SREV_9462(_ah) 0 906#define AR_SREV_9462(_ah) 0
904#define AR_SREV_9485(_ah) 0 907#define AR_SREV_9485(_ah) 0
905#define AR_SREV_9565(_ah) 0 908#define AR_SREV_9565(_ah) 0
909#define AR_SREV_9003_PCOEM(_ah) 0
906#endif 910#endif
907 911
908#define AR_SREV_9485_11_OR_LATER(_ah) \ 912#define AR_SREV_9485_11_OR_LATER(_ah) \
@@ -974,6 +978,9 @@
974 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \ 978 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
975 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_20)) 979 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_20))
976 980
981#define AR_SREV_9561(_ah) \
982 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
983
977/* NOTE: When adding chips newer than Peacock, add chip check here */ 984/* NOTE: When adding chips newer than Peacock, add chip check here */
978#define AR_SREV_9580_10_OR_LATER(_ah) \ 985#define AR_SREV_9580_10_OR_LATER(_ah) \
979 (AR_SREV_9580(_ah)) 986 (AR_SREV_9580(_ah))
@@ -1876,6 +1883,7 @@ enum {
1876#define AR_FIRST_NDP_TIMER 7 1883#define AR_FIRST_NDP_TIMER 7
1877#define AR_NDP2_PERIOD 0x81a0 1884#define AR_NDP2_PERIOD 0x81a0
1878#define AR_NDP2_TIMER_MODE 0x81c0 1885#define AR_NDP2_TIMER_MODE 0x81c0
1886#define AR_GEN_TIMERS2_MODE_ENABLE_MASK 0x000000FF
1879 1887
1880#define AR_GEN_TIMERS(_i) (0x8200 + ((_i) << 2)) 1888#define AR_GEN_TIMERS(_i) (0x8200 + ((_i) << 2))
1881#define AR_NEXT_TBTT_TIMER AR_GEN_TIMERS(0) 1889#define AR_NEXT_TBTT_TIMER AR_GEN_TIMERS(0)
@@ -1971,6 +1979,7 @@ enum {
1971 1979
1972#define AR_DIRECT_CONNECT 0x83a0 1980#define AR_DIRECT_CONNECT 0x83a0
1973#define AR_DC_AP_STA_EN 0x00000001 1981#define AR_DC_AP_STA_EN 0x00000001
1982#define AR_DC_TSF2_ENABLE 0x00000001
1974 1983
1975#define AR_AES_MUTE_MASK0 0x805c 1984#define AR_AES_MUTE_MASK0 0x805c
1976#define AR_AES_MUTE_MASK0_FC 0x0000FFFF 1985#define AR_AES_MUTE_MASK0_FC 0x0000FFFF
@@ -2003,126 +2012,6 @@ enum {
2003 2012
2004#define AR_WOW_BEACON_TIMO_MAX 0xffffffff 2013#define AR_WOW_BEACON_TIMO_MAX 0xffffffff
2005 2014
2006/*
2007 * MAC WoW Registers
2008 */
2009
2010#define AR_WOW_PATTERN 0x825C
2011#define AR_WOW_COUNT 0x8260
2012#define AR_WOW_BCN_EN 0x8270
2013#define AR_WOW_BCN_TIMO 0x8274
2014#define AR_WOW_KEEP_ALIVE_TIMO 0x8278
2015#define AR_WOW_KEEP_ALIVE 0x827c
2016#define AR_WOW_US_SCALAR 0x8284
2017#define AR_WOW_KEEP_ALIVE_DELAY 0x8288
2018#define AR_WOW_PATTERN_MATCH 0x828c
2019#define AR_WOW_PATTERN_OFF1 0x8290 /* pattern bytes 0 -> 3 */
2020#define AR_WOW_PATTERN_OFF2 0x8294 /* pattern bytes 4 -> 7 */
2021
2022/* for AR9285 or later version of chips */
2023#define AR_WOW_EXACT 0x829c
2024#define AR_WOW_LENGTH1 0x8360
2025#define AR_WOW_LENGTH2 0X8364
2026/* register to enable match for less than 256 bytes packets */
2027#define AR_WOW_PATTERN_MATCH_LT_256B 0x8368
2028
2029#define AR_SW_WOW_CONTROL 0x20018
2030#define AR_SW_WOW_ENABLE 0x1
2031#define AR_SWITCH_TO_REFCLK 0x2
2032#define AR_RESET_CONTROL 0x4
2033#define AR_RESET_VALUE_MASK 0x8
2034#define AR_HW_WOW_DISABLE 0x10
2035#define AR_CLR_MAC_INTERRUPT 0x20
2036#define AR_CLR_KA_INTERRUPT 0x40
2037
2038/* AR_WOW_PATTERN register values */
2039#define AR_WOW_BACK_OFF_SHIFT(x) ((x & 0xf) << 28) /* in usecs */
2040#define AR_WOW_MAC_INTR_EN 0x00040000
2041#define AR_WOW_MAGIC_EN 0x00010000
2042#define AR_WOW_PATTERN_EN(x) (x & 0xff)
2043#define AR_WOW_PAT_FOUND_SHIFT 8
2044#define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
2045#define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
2046#define AR_WOW_MAGIC_PAT_FOUND 0x00020000
2047#define AR_WOW_MAC_INTR 0x00080000
2048#define AR_WOW_KEEP_ALIVE_FAIL 0x00100000
2049#define AR_WOW_BEACON_FAIL 0x00200000
2050
2051#define AR_WOW_STATUS(x) (x & (AR_WOW_PATTERN_FOUND_MASK | \
2052 AR_WOW_MAGIC_PAT_FOUND | \
2053 AR_WOW_KEEP_ALIVE_FAIL | \
2054 AR_WOW_BEACON_FAIL))
2055#define AR_WOW_CLEAR_EVENTS(x) (x & ~(AR_WOW_PATTERN_EN(0xff) | \
2056 AR_WOW_MAGIC_EN | \
2057 AR_WOW_MAC_INTR_EN | \
2058 AR_WOW_BEACON_FAIL | \
2059 AR_WOW_KEEP_ALIVE_FAIL))
2060
2061/* AR_WOW_COUNT register values */
2062#define AR_WOW_AIFS_CNT(x) (x & 0xff)
2063#define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8)
2064#define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16)
2065
2066/* AR_WOW_BCN_EN register */
2067#define AR_WOW_BEACON_FAIL_EN 0x00000001
2068
2069/* AR_WOW_BCN_TIMO rgister */
2070#define AR_WOW_BEACON_TIMO 0x40000000 /* valid if BCN_EN is set */
2071
2072/* AR_WOW_KEEP_ALIVE_TIMO register */
2073#define AR_WOW_KEEP_ALIVE_TIMO_VALUE
2074#define AR_WOW_KEEP_ALIVE_NEVER 0xffffffff
2075
2076/* AR_WOW_KEEP_ALIVE register */
2077#define AR_WOW_KEEP_ALIVE_AUTO_DIS 0x00000001
2078#define AR_WOW_KEEP_ALIVE_FAIL_DIS 0x00000002
2079
2080/* AR_WOW_KEEP_ALIVE_DELAY register */
2081#define AR_WOW_KEEP_ALIVE_DELAY_VALUE 0x000003e8 /* 1 msec */
2082
2083
2084/*
2085 * keep it long for beacon workaround - ensure no false alarm
2086 */
2087#define AR_WOW_BMISSTHRESHOLD 0x20
2088
2089/* AR_WOW_PATTERN_MATCH register */
2090#define AR_WOW_PAT_END_OF_PKT(x) (x & 0xf)
2091#define AR_WOW_PAT_OFF_MATCH(x) ((x & 0xf) << 8)
2092
2093/*
2094 * default values for Wow Configuration for backoff, aifs, slot, keep-alive
2095 * to be programmed into various registers.
2096 */
2097#define AR_WOW_PAT_BACKOFF 0x00000004 /* AR_WOW_PATTERN_REG */
2098#define AR_WOW_CNT_AIFS_CNT 0x00000022 /* AR_WOW_COUNT_REG */
2099#define AR_WOW_CNT_SLOT_CNT 0x00000009 /* AR_WOW_COUNT_REG */
2100/*
2101 * Keepalive count applicable for AR9280 2.0 and above.
2102 */
2103#define AR_WOW_CNT_KA_CNT 0x00000008 /* AR_WOW_COUNT register */
2104
2105/* WoW - Transmit buffer for keep alive frames */
2106#define AR_WOW_TRANSMIT_BUFFER 0xe000 /* E000 - EFFC */
2107
2108#define AR_WOW_TXBUF(i) (AR_WOW_TRANSMIT_BUFFER + ((i) << 2))
2109
2110#define AR_WOW_KA_DESC_WORD2 0xe000
2111
2112#define AR_WOW_KA_DATA_WORD0 0xe030
2113
2114/* WoW Transmit Buffer for patterns */
2115#define AR_WOW_TB_PATTERN(i) (0xe100 + (i << 8))
2116#define AR_WOW_TB_MASK(i) (0xec00 + (i << 5))
2117
2118/* Currently Pattern 0-7 are supported - so bit 0-7 are set */
2119#define AR_WOW_PATTERN_SUPPORTED 0xff
2120#define AR_WOW_LENGTH_MAX 0xff
2121#define AR_WOW_LEN1_SHIFT(_i) ((0x3 - ((_i) & 0x3)) << 0x3)
2122#define AR_WOW_LENGTH1_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN1_SHIFT(_i))
2123#define AR_WOW_LEN2_SHIFT(_i) ((0x7 - ((_i) & 0x7)) << 0x3)
2124#define AR_WOW_LENGTH2_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN2_SHIFT(_i))
2125
2126#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */ 2015#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
2127#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */ 2016#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
2128 2017
diff --git a/drivers/net/wireless/ath/ath9k/reg_wow.h b/drivers/net/wireless/ath/ath9k/reg_wow.h
new file mode 100644
index 000000000000..3abfca56ca58
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/reg_wow.h
@@ -0,0 +1,128 @@
1/*
2 * Copyright (c) 2015 Qualcomm Atheros Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REG_WOW_H
18#define REG_WOW_H
19
20#define AR_WOW_PATTERN 0x825C
21#define AR_WOW_COUNT 0x8260
22#define AR_WOW_BCN_EN 0x8270
23#define AR_WOW_BCN_TIMO 0x8274
24#define AR_WOW_KEEP_ALIVE_TIMO 0x8278
25#define AR_WOW_KEEP_ALIVE 0x827c
26#define AR_WOW_KEEP_ALIVE_DELAY 0x8288
27#define AR_WOW_PATTERN_MATCH 0x828c
28
29/*
30 * AR_WOW_LENGTH1
31 * bit 31:24 pattern 0 length
32 * bit 23:16 pattern 1 length
33 * bit 15:8 pattern 2 length
34 * bit 7:0 pattern 3 length
35 *
36 * AR_WOW_LENGTH2
37 * bit 31:24 pattern 4 length
38 * bit 23:16 pattern 5 length
39 * bit 15:8 pattern 6 length
40 * bit 7:0 pattern 7 length
41 *
42 * AR_WOW_LENGTH3
43 * bit 31:24 pattern 8 length
44 * bit 23:16 pattern 9 length
45 * bit 15:8 pattern 10 length
46 * bit 7:0 pattern 11 length
47 *
48 * AR_WOW_LENGTH4
49 * bit 31:24 pattern 12 length
50 * bit 23:16 pattern 13 length
51 * bit 15:8 pattern 14 length
52 * bit 7:0 pattern 15 length
53 */
54#define AR_WOW_LENGTH1 0x8360
55#define AR_WOW_LENGTH2 0X8364
56#define AR_WOW_LENGTH3 0X8380
57#define AR_WOW_LENGTH4 0X8384
58
59#define AR_WOW_PATTERN_MATCH_LT_256B 0x8368
60#define AR_MAC_PCU_WOW4 0x8370
61
62#define AR_SW_WOW_CONTROL 0x20018
63#define AR_SW_WOW_ENABLE 0x1
64#define AR_SWITCH_TO_REFCLK 0x2
65#define AR_RESET_CONTROL 0x4
66#define AR_RESET_VALUE_MASK 0x8
67#define AR_HW_WOW_DISABLE 0x10
68#define AR_CLR_MAC_INTERRUPT 0x20
69#define AR_CLR_KA_INTERRUPT 0x40
70
71#define AR_WOW_BACK_OFF_SHIFT(x) ((x & 0xf) << 27) /* in usecs */
72#define AR_WOW_MAC_INTR_EN 0x00040000
73#define AR_WOW_MAGIC_EN 0x00010000
74#define AR_WOW_PATTERN_EN(x) (x & 0xff)
75#define AR_WOW_PAT_FOUND_SHIFT 8
76#define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
77#define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
78#define AR_WOW_MAGIC_PAT_FOUND 0x00020000
79#define AR_WOW_MAC_INTR 0x00080000
80#define AR_WOW_KEEP_ALIVE_FAIL 0x00100000
81#define AR_WOW_BEACON_FAIL 0x00200000
82
83#define AR_WOW_STATUS(x) (x & (AR_WOW_PATTERN_FOUND_MASK | \
84 AR_WOW_MAGIC_PAT_FOUND | \
85 AR_WOW_KEEP_ALIVE_FAIL | \
86 AR_WOW_BEACON_FAIL))
87#define AR_WOW_CLEAR_EVENTS(x) (x & ~(AR_WOW_PATTERN_EN(0xff) | \
88 AR_WOW_MAGIC_EN | \
89 AR_WOW_MAC_INTR_EN | \
90 AR_WOW_BEACON_FAIL | \
91 AR_WOW_KEEP_ALIVE_FAIL))
92
93#define AR_WOW_AIFS_CNT(x) (x & 0xff)
94#define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8)
95#define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16)
96
97#define AR_WOW_BEACON_FAIL_EN 0x00000001
98#define AR_WOW_BEACON_TIMO 0x40000000
99#define AR_WOW_KEEP_ALIVE_NEVER 0xffffffff
100#define AR_WOW_KEEP_ALIVE_AUTO_DIS 0x00000001
101#define AR_WOW_KEEP_ALIVE_FAIL_DIS 0x00000002
102#define AR_WOW_KEEP_ALIVE_DELAY_VALUE 0x000003e8 /* 1 msec */
103#define AR_WOW_BMISSTHRESHOLD 0x20
104#define AR_WOW_PAT_END_OF_PKT(x) (x & 0xf)
105#define AR_WOW_PAT_OFF_MATCH(x) ((x & 0xf) << 8)
106#define AR_WOW_PAT_BACKOFF 0x00000004
107#define AR_WOW_CNT_AIFS_CNT 0x00000022
108#define AR_WOW_CNT_SLOT_CNT 0x00000009
109#define AR_WOW_CNT_KA_CNT 0x00000008
110
111#define AR_WOW_TRANSMIT_BUFFER 0xe000
112#define AR_WOW_TXBUF(i) (AR_WOW_TRANSMIT_BUFFER + ((i) << 2))
113#define AR_WOW_KA_DESC_WORD2 0xe000
114#define AR_WOW_TB_PATTERN(i) (0xe100 + (i << 8))
115#define AR_WOW_TB_MASK(i) (0xec00 + (i << 5))
116#define AR_WOW_PATTERN_SUPPORTED_LEGACY 0xff
117#define AR_WOW_PATTERN_SUPPORTED 0xffff
118#define AR_WOW_LENGTH_MAX 0xff
119#define AR_WOW_LEN1_SHIFT(_i) ((0x3 - ((_i) & 0x3)) << 0x3)
120#define AR_WOW_LENGTH1_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN1_SHIFT(_i))
121#define AR_WOW_LEN2_SHIFT(_i) ((0x7 - ((_i) & 0x7)) << 0x3)
122#define AR_WOW_LENGTH2_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN2_SHIFT(_i))
123#define AR_WOW_LEN3_SHIFT(_i) ((0xb - ((_i) & 0xb)) << 0x3)
124#define AR_WOW_LENGTH3_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN3_SHIFT(_i))
125#define AR_WOW_LEN4_SHIFT(_i) ((0xf - ((_i) & 0xf)) << 0x3)
126#define AR_WOW_LENGTH4_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN4_SHIFT(_i))
127
128#endif /* REG_WOW_H */
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 5f30e580d942..8d0b1730a9d5 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -16,36 +16,43 @@
16 16
17#include "ath9k.h" 17#include "ath9k.h"
18 18
19static const struct wiphy_wowlan_support ath9k_wowlan_support = { 19static const struct wiphy_wowlan_support ath9k_wowlan_support_legacy = {
20 .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, 20 .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
21 .n_patterns = MAX_NUM_USER_PATTERN, 21 .n_patterns = MAX_NUM_USER_PATTERN,
22 .pattern_min_len = 1, 22 .pattern_min_len = 1,
23 .pattern_max_len = MAX_PATTERN_SIZE, 23 .pattern_max_len = MAX_PATTERN_SIZE,
24}; 24};
25 25
26static void ath9k_wow_map_triggers(struct ath_softc *sc, 26static const struct wiphy_wowlan_support ath9k_wowlan_support = {
27 struct cfg80211_wowlan *wowlan, 27 .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
28 u32 *wow_triggers) 28 .n_patterns = MAX_NUM_PATTERN - 2,
29 .pattern_min_len = 1,
30 .pattern_max_len = MAX_PATTERN_SIZE,
31};
32
33static u8 ath9k_wow_map_triggers(struct ath_softc *sc,
34 struct cfg80211_wowlan *wowlan)
29{ 35{
36 u8 wow_triggers = 0;
37
30 if (wowlan->disconnect) 38 if (wowlan->disconnect)
31 *wow_triggers |= AH_WOW_LINK_CHANGE | 39 wow_triggers |= AH_WOW_LINK_CHANGE |
32 AH_WOW_BEACON_MISS; 40 AH_WOW_BEACON_MISS;
33 if (wowlan->magic_pkt) 41 if (wowlan->magic_pkt)
34 *wow_triggers |= AH_WOW_MAGIC_PATTERN_EN; 42 wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
35 43
36 if (wowlan->n_patterns) 44 if (wowlan->n_patterns)
37 *wow_triggers |= AH_WOW_USER_PATTERN_EN; 45 wow_triggers |= AH_WOW_USER_PATTERN_EN;
38
39 sc->wow_enabled = *wow_triggers;
40 46
47 return wow_triggers;
41} 48}
42 49
43static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc) 50static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
44{ 51{
45 struct ath_hw *ah = sc->sc_ah; 52 struct ath_hw *ah = sc->sc_ah;
46 struct ath_common *common = ath9k_hw_common(ah); 53 struct ath_common *common = ath9k_hw_common(ah);
47 int pattern_count = 0; 54 int pattern_count = 0;
48 int i, byte_cnt; 55 int ret, i, byte_cnt = 0;
49 u8 dis_deauth_pattern[MAX_PATTERN_SIZE]; 56 u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
50 u8 dis_deauth_mask[MAX_PATTERN_SIZE]; 57 u8 dis_deauth_mask[MAX_PATTERN_SIZE];
51 58
@@ -80,12 +87,7 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
80 * | x:x:x:x:x:x -- 22 bytes 87 * | x:x:x:x:x:x -- 22 bytes
81 */ 88 */
82 89
83 /* Create Disassociate Pattern first */
84
85 byte_cnt = 0;
86
87 /* Fill out the mask with all FF's */ 90 /* Fill out the mask with all FF's */
88
89 for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++) 91 for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
90 dis_deauth_mask[i] = 0xff; 92 dis_deauth_mask[i] = 0xff;
91 93
@@ -108,19 +110,17 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
108 byte_cnt += 6; 110 byte_cnt += 6;
109 111
110 /* copy the bssid, its same as the source mac address */ 112 /* copy the bssid, its same as the source mac address */
111
112 memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN); 113 memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
113 114
114 /* Create Disassociate pattern mask */ 115 /* Create Disassociate pattern mask */
115
116 dis_deauth_mask[0] = 0xfe; 116 dis_deauth_mask[0] = 0xfe;
117 dis_deauth_mask[1] = 0x03; 117 dis_deauth_mask[1] = 0x03;
118 dis_deauth_mask[2] = 0xc0; 118 dis_deauth_mask[2] = 0xc0;
119 119
120 ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n"); 120 ret = ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
121 121 pattern_count, byte_cnt);
122 ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask, 122 if (ret)
123 pattern_count, byte_cnt); 123 goto exit;
124 124
125 pattern_count++; 125 pattern_count++;
126 /* 126 /*
@@ -129,59 +129,39 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
129 */ 129 */
130 dis_deauth_pattern[0] = 0xC0; 130 dis_deauth_pattern[0] = 0xC0;
131 131
132 ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask, 132 ret = ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
133 pattern_count, byte_cnt); 133 pattern_count, byte_cnt);
134 134exit:
135 return ret;
135} 136}
136 137
137static void ath9k_wow_add_pattern(struct ath_softc *sc, 138static int ath9k_wow_add_pattern(struct ath_softc *sc,
138 struct cfg80211_wowlan *wowlan) 139 struct cfg80211_wowlan *wowlan)
139{ 140{
140 struct ath_hw *ah = sc->sc_ah; 141 struct ath_hw *ah = sc->sc_ah;
141 struct ath9k_wow_pattern *wow_pattern = NULL;
142 struct cfg80211_pkt_pattern *patterns = wowlan->patterns; 142 struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
143 int mask_len; 143 u8 wow_pattern[MAX_PATTERN_SIZE];
144 u8 wow_mask[MAX_PATTERN_SIZE];
145 int mask_len, ret = 0;
144 s8 i = 0; 146 s8 i = 0;
145 147
146 if (!wowlan->n_patterns)
147 return;
148
149 /*
150 * Add the new user configured patterns
151 */
152 for (i = 0; i < wowlan->n_patterns; i++) { 148 for (i = 0; i < wowlan->n_patterns; i++) {
153 149 mask_len = DIV_ROUND_UP(patterns[i].pattern_len, 8);
154 wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL); 150 memset(wow_pattern, 0, MAX_PATTERN_SIZE);
155 151 memset(wow_mask, 0, MAX_PATTERN_SIZE);
156 if (!wow_pattern) 152 memcpy(wow_pattern, patterns[i].pattern, patterns[i].pattern_len);
157 return; 153 memcpy(wow_mask, patterns[i].mask, mask_len);
158 154
159 /* 155 ret = ath9k_hw_wow_apply_pattern(ah,
160 * TODO: convert the generic user space pattern to 156 wow_pattern,
161 * appropriate chip specific/802.11 pattern. 157 wow_mask,
162 */ 158 i + 2,
163 159 patterns[i].pattern_len);
164 mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); 160 if (ret)
165 memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE); 161 break;
166 memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
167 memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
168 patterns[i].pattern_len);
169 memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
170 wow_pattern->pattern_len = patterns[i].pattern_len;
171
172 /*
173 * just need to take care of deauth and disssoc pattern,
174 * make sure we don't overwrite them.
175 */
176
177 ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
178 wow_pattern->mask_bytes,
179 i + 2,
180 wow_pattern->pattern_len);
181 kfree(wow_pattern);
182
183 } 162 }
184 163
164 return ret;
185} 165}
186 166
187int ath9k_suspend(struct ieee80211_hw *hw, 167int ath9k_suspend(struct ieee80211_hw *hw,
@@ -190,41 +170,39 @@ int ath9k_suspend(struct ieee80211_hw *hw,
190 struct ath_softc *sc = hw->priv; 170 struct ath_softc *sc = hw->priv;
191 struct ath_hw *ah = sc->sc_ah; 171 struct ath_hw *ah = sc->sc_ah;
192 struct ath_common *common = ath9k_hw_common(ah); 172 struct ath_common *common = ath9k_hw_common(ah);
193 u32 wow_triggers_enabled = 0; 173 u8 triggers;
194 int ret = 0; 174 int ret = 0;
195 175
196 ath9k_deinit_channel_context(sc); 176 ath9k_deinit_channel_context(sc);
197 177
198 mutex_lock(&sc->mutex); 178 mutex_lock(&sc->mutex);
199 179
200 ath_cancel_work(sc);
201 ath_stop_ani(sc);
202
203 if (test_bit(ATH_OP_INVALID, &common->op_flags)) { 180 if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
204 ath_dbg(common, ANY, "Device not present\n"); 181 ath_err(common, "Device not present\n");
205 ret = -EINVAL; 182 ret = -ENODEV;
206 goto fail_wow; 183 goto fail_wow;
207 } 184 }
208 185
209 if (WARN_ON(!wowlan)) { 186 if (WARN_ON(!wowlan)) {
210 ath_dbg(common, WOW, "None of the WoW triggers enabled\n"); 187 ath_err(common, "None of the WoW triggers enabled\n");
211 ret = -EINVAL; 188 ret = -EINVAL;
212 goto fail_wow; 189 goto fail_wow;
213 } 190 }
214 191
215 if (!device_can_wakeup(sc->dev)) { 192 if (sc->cur_chan->nvifs > 1) {
216 ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n"); 193 ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
217 ret = 1; 194 ret = 1;
218 goto fail_wow; 195 goto fail_wow;
219 } 196 }
220 197
221 /* 198 if (ath9k_is_chanctx_enabled()) {
222 * none of the sta vifs are associated 199 if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) {
223 * and we are not currently handling multivif 200 ath_dbg(common, WOW,
224 * cases, for instance we have to seperately 201 "Multi-channel WOW is not supported\n");
225 * configure 'keep alive frame' for each 202 ret = 1;
226 * STA. 203 goto fail_wow;
227 */ 204 }
205 }
228 206
229 if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) { 207 if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
230 ath_dbg(common, WOW, "None of the STA vifs are associated\n"); 208 ath_dbg(common, WOW, "None of the STA vifs are associated\n");
@@ -232,16 +210,15 @@ int ath9k_suspend(struct ieee80211_hw *hw,
232 goto fail_wow; 210 goto fail_wow;
233 } 211 }
234 212
235 if (sc->cur_chan->nvifs > 1) { 213 triggers = ath9k_wow_map_triggers(sc, wowlan);
236 ath_dbg(common, WOW, "WoW for multivif is not yet supported\n"); 214 if (!triggers) {
215 ath_dbg(common, WOW, "No valid WoW triggers\n");
237 ret = 1; 216 ret = 1;
238 goto fail_wow; 217 goto fail_wow;
239 } 218 }
240 219
241 ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled); 220 ath_cancel_work(sc);
242 221 ath_stop_ani(sc);
243 ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
244 wow_triggers_enabled);
245 222
246 ath9k_ps_wakeup(sc); 223 ath9k_ps_wakeup(sc);
247 224
@@ -251,10 +228,21 @@ int ath9k_suspend(struct ieee80211_hw *hw,
251 * Enable wake up on recieving disassoc/deauth 228 * Enable wake up on recieving disassoc/deauth
252 * frame by default. 229 * frame by default.
253 */ 230 */
254 ath9k_wow_add_disassoc_deauth_pattern(sc); 231 ret = ath9k_wow_add_disassoc_deauth_pattern(sc);
232 if (ret) {
233 ath_err(common,
234 "Unable to add disassoc/deauth pattern: %d\n", ret);
235 goto fail_wow;
236 }
255 237
256 if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN) 238 if (triggers & AH_WOW_USER_PATTERN_EN) {
257 ath9k_wow_add_pattern(sc, wowlan); 239 ret = ath9k_wow_add_pattern(sc, wowlan);
240 if (ret) {
241 ath_err(common,
242 "Unable to add user pattern: %d\n", ret);
243 goto fail_wow;
244 }
245 }
258 246
259 spin_lock_bh(&sc->sc_pcu_lock); 247 spin_lock_bh(&sc->sc_pcu_lock);
260 /* 248 /*
@@ -278,12 +266,12 @@ int ath9k_suspend(struct ieee80211_hw *hw,
278 synchronize_irq(sc->irq); 266 synchronize_irq(sc->irq);
279 tasklet_kill(&sc->intr_tq); 267 tasklet_kill(&sc->intr_tq);
280 268
281 ath9k_hw_wow_enable(ah, wow_triggers_enabled); 269 ath9k_hw_wow_enable(ah, triggers);
282 270
283 ath9k_ps_restore(sc); 271 ath9k_ps_restore(sc);
284 ath_dbg(common, ANY, "WoW enabled in ath9k\n"); 272 ath_dbg(common, WOW, "Suspend with WoW triggers: 0x%x\n", triggers);
285 atomic_inc(&sc->wow_sleep_proc_intr);
286 273
274 set_bit(ATH_OP_WOW_ENABLED, &common->op_flags);
287fail_wow: 275fail_wow:
288 mutex_unlock(&sc->mutex); 276 mutex_unlock(&sc->mutex);
289 return ret; 277 return ret;
@@ -294,7 +282,7 @@ int ath9k_resume(struct ieee80211_hw *hw)
294 struct ath_softc *sc = hw->priv; 282 struct ath_softc *sc = hw->priv;
295 struct ath_hw *ah = sc->sc_ah; 283 struct ath_hw *ah = sc->sc_ah;
296 struct ath_common *common = ath9k_hw_common(ah); 284 struct ath_common *common = ath9k_hw_common(ah);
297 u32 wow_status; 285 u8 status;
298 286
299 mutex_lock(&sc->mutex); 287 mutex_lock(&sc->mutex);
300 288
@@ -309,29 +297,14 @@ int ath9k_resume(struct ieee80211_hw *hw)
309 297
310 spin_unlock_bh(&sc->sc_pcu_lock); 298 spin_unlock_bh(&sc->sc_pcu_lock);
311 299
312 wow_status = ath9k_hw_wow_wakeup(ah); 300 status = ath9k_hw_wow_wakeup(ah);
313 301 ath_dbg(common, WOW, "Resume with WoW status: 0x%x\n", status);
314 if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
315 /*
316 * some devices may not pick beacon miss
317 * as the reason they woke up so we add
318 * that here for that shortcoming.
319 */
320 wow_status |= AH_WOW_BEACON_MISS;
321 atomic_dec(&sc->wow_got_bmiss_intr);
322 ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
323 }
324
325 atomic_dec(&sc->wow_sleep_proc_intr);
326
327 if (wow_status) {
328 ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
329 ath9k_hw_wow_event_to_string(wow_status), wow_status);
330 }
331 302
332 ath_restart_work(sc); 303 ath_restart_work(sc);
333 ath9k_start_btcoex(sc); 304 ath9k_start_btcoex(sc);
334 305
306 clear_bit(ATH_OP_WOW_ENABLED, &common->op_flags);
307
335 ath9k_ps_restore(sc); 308 ath9k_ps_restore(sc);
336 mutex_unlock(&sc->mutex); 309 mutex_unlock(&sc->mutex);
337 310
@@ -341,22 +314,35 @@ int ath9k_resume(struct ieee80211_hw *hw)
341void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled) 314void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
342{ 315{
343 struct ath_softc *sc = hw->priv; 316 struct ath_softc *sc = hw->priv;
317 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
344 318
345 mutex_lock(&sc->mutex); 319 mutex_lock(&sc->mutex);
346 device_init_wakeup(sc->dev, 1);
347 device_set_wakeup_enable(sc->dev, enabled); 320 device_set_wakeup_enable(sc->dev, enabled);
348 mutex_unlock(&sc->mutex); 321 mutex_unlock(&sc->mutex);
322
323 ath_dbg(common, WOW, "WoW wakeup source is %s\n",
324 (enabled) ? "enabled" : "disabled");
349} 325}
350 326
351void ath9k_init_wow(struct ieee80211_hw *hw) 327void ath9k_init_wow(struct ieee80211_hw *hw)
352{ 328{
353 struct ath_softc *sc = hw->priv; 329 struct ath_softc *sc = hw->priv;
330 struct ath_hw *ah = sc->sc_ah;
331
332 if ((sc->driver_data & ATH9K_PCI_WOW) || sc->force_wow) {
333 if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565_11_OR_LATER(ah))
334 hw->wiphy->wowlan = &ath9k_wowlan_support;
335 else
336 hw->wiphy->wowlan = &ath9k_wowlan_support_legacy;
354 337
355 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && 338 device_init_wakeup(sc->dev, 1);
356 (sc->driver_data & ATH9K_PCI_WOW) && 339 }
357 device_can_wakeup(sc->dev)) 340}
358 hw->wiphy->wowlan = &ath9k_wowlan_support; 341
342void ath9k_deinit_wow(struct ieee80211_hw *hw)
343{
344 struct ath_softc *sc = hw->priv;
359 345
360 atomic_set(&sc->wow_sleep_proc_intr, -1); 346 if ((sc->driver_data & ATH9K_PCI_WOW) || sc->force_wow)
361 atomic_set(&sc->wow_got_bmiss_intr, -1); 347 device_init_wakeup(sc->dev, 0);
362} 348}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index e9bd02c2e844..1b8e75c4d2c2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1097,24 +1097,65 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
1097} 1097}
1098 1098
1099static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, 1099static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
1100 u8 rateidx) 1100 u8 rateidx, bool is_40, bool is_cck)
1101{ 1101{
1102 u8 max_power; 1102 u8 max_power;
1103 struct sk_buff *skb;
1104 struct ath_frame_info *fi;
1105 struct ieee80211_tx_info *info;
1103 struct ath_hw *ah = sc->sc_ah; 1106 struct ath_hw *ah = sc->sc_ah;
1104 1107
1105 if (sc->tx99_state) 1108 if (sc->tx99_state || !ah->tpc_enabled)
1106 return MAX_RATE_POWER; 1109 return MAX_RATE_POWER;
1107 1110
1111 skb = bf->bf_mpdu;
1112 fi = get_frame_info(skb);
1113 info = IEEE80211_SKB_CB(skb);
1114
1108 if (!AR_SREV_9300_20_OR_LATER(ah)) { 1115 if (!AR_SREV_9300_20_OR_LATER(ah)) {
1109 /* ar9002 is not sipported for the moment */ 1116 int txpower = fi->tx_power;
1110 return MAX_RATE_POWER;
1111 }
1112 1117
1113 if (!bf->bf_state.bfs_paprd) { 1118 if (is_40) {
1114 struct sk_buff *skb = bf->bf_mpdu; 1119 u8 power_ht40delta;
1115 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1120 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1116 struct ath_frame_info *fi = get_frame_info(skb); 1121
1122 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) {
1123 bool is_2ghz;
1124 struct modal_eep_header *pmodal;
1117 1125
1126 is_2ghz = info->band == IEEE80211_BAND_2GHZ;
1127 pmodal = &eep->modalHeader[is_2ghz];
1128 power_ht40delta = pmodal->ht40PowerIncForPdadc;
1129 } else {
1130 power_ht40delta = 2;
1131 }
1132 txpower += power_ht40delta;
1133 }
1134
1135 if (AR_SREV_9287(ah) || AR_SREV_9285(ah) ||
1136 AR_SREV_9271(ah)) {
1137 txpower -= 2 * AR9287_PWR_TABLE_OFFSET_DB;
1138 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
1139 s8 power_offset;
1140
1141 power_offset = ah->eep_ops->get_eeprom(ah,
1142 EEP_PWR_TABLE_OFFSET);
1143 txpower -= 2 * power_offset;
1144 }
1145
1146 if (OLC_FOR_AR9280_20_LATER && is_cck)
1147 txpower -= 2;
1148
1149 txpower = max(txpower, 0);
1150 max_power = min_t(u8, ah->tx_power[rateidx], txpower);
1151
1152 /* XXX: clamp minimum TX power at 1 for AR9160 since if
1153 * max_power is set to 0, frames are transmitted at max
1154 * TX power
1155 */
1156 if (!max_power && !AR_SREV_9280_20_OR_LATER(ah))
1157 max_power = 1;
1158 } else if (!bf->bf_state.bfs_paprd) {
1118 if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC)) 1159 if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
1119 max_power = min(ah->tx_power_stbc[rateidx], 1160 max_power = min(ah->tx_power_stbc[rateidx],
1120 fi->tx_power); 1161 fi->tx_power);
@@ -1152,7 +1193,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1152 info->rtscts_rate = fi->rtscts_rate; 1193 info->rtscts_rate = fi->rtscts_rate;
1153 1194
1154 for (i = 0; i < ARRAY_SIZE(bf->rates); i++) { 1195 for (i = 0; i < ARRAY_SIZE(bf->rates); i++) {
1155 bool is_40, is_sgi, is_sp; 1196 bool is_40, is_sgi, is_sp, is_cck;
1156 int phy; 1197 int phy;
1157 1198
1158 if (!rates[i].count || (rates[i].idx < 0)) 1199 if (!rates[i].count || (rates[i].idx < 0))
@@ -1198,7 +1239,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1198 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1239 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1199 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; 1240 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
1200 1241
1201 info->txpower[i] = ath_get_rate_txpower(sc, bf, rix); 1242 info->txpower[i] = ath_get_rate_txpower(sc, bf, rix,
1243 is_40, false);
1202 continue; 1244 continue;
1203 } 1245 }
1204 1246
@@ -1227,7 +1269,9 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
1227 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1269 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1228 phy, rate->bitrate * 100, len, rix, is_sp); 1270 phy, rate->bitrate * 100, len, rix, is_sp);
1229 1271
1230 info->txpower[i] = ath_get_rate_txpower(sc, bf, rix); 1272 is_cck = IS_CCK_RATE(info->rates[i].Rate);
1273 info->txpower[i] = ath_get_rate_txpower(sc, bf, rix, false,
1274 is_cck);
1231 } 1275 }
1232 1276
1233 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1277 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
@@ -2259,7 +2303,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2259 struct ath_txq *txq = txctl->txq; 2303 struct ath_txq *txq = txctl->txq;
2260 struct ath_atx_tid *tid = NULL; 2304 struct ath_atx_tid *tid = NULL;
2261 struct ath_buf *bf; 2305 struct ath_buf *bf;
2262 bool queue, skip_uapsd = false; 2306 bool queue, skip_uapsd = false, ps_resp;
2263 int q, ret; 2307 int q, ret;
2264 2308
2265 if (vif) 2309 if (vif)
@@ -2268,6 +2312,8 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2268 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) 2312 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
2269 txctl->force_channel = true; 2313 txctl->force_channel = true;
2270 2314
2315 ps_resp = !!(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE);
2316
2271 ret = ath_tx_prepare(hw, skb, txctl); 2317 ret = ath_tx_prepare(hw, skb, txctl);
2272 if (ret) 2318 if (ret)
2273 return ret; 2319 return ret;
@@ -2310,7 +2356,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2310 if (txctl->an && queue) 2356 if (txctl->an && queue)
2311 tid = ath_get_skb_tid(sc, txctl->an, skb); 2357 tid = ath_get_skb_tid(sc, txctl->an, skb);
2312 2358
2313 if (!skip_uapsd && (info->flags & IEEE80211_TX_CTL_PS_RESPONSE)) { 2359 if (!skip_uapsd && ps_resp) {
2314 ath_txq_unlock(sc, txq); 2360 ath_txq_unlock(sc, txq);
2315 txq = sc->tx.uapsdq; 2361 txq = sc->tx.uapsdq;
2316 ath_txq_lock(sc, txq); 2362 ath_txq_lock(sc, txq);
@@ -2443,9 +2489,12 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
2443 if (sc->sc_ah->caldata) 2489 if (sc->sc_ah->caldata)
2444 set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags); 2490 set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
2445 2491
2446 if (!(tx_flags & ATH_TX_ERROR)) 2492 if (!(tx_flags & ATH_TX_ERROR)) {
2447 /* Frame was ACKed */ 2493 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
2448 tx_info->flags |= IEEE80211_TX_STAT_ACK; 2494 tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
2495 else
2496 tx_info->flags |= IEEE80211_TX_STAT_ACK;
2497 }
2449 2498
2450 padpos = ieee80211_hdrlen(hdr->frame_control); 2499 padpos = ieee80211_hdrlen(hdr->frame_control);
2451 padsize = padpos & 3; 2500 padsize = padpos & 3;
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index 39a63874b275..f2b4f537e4c1 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -188,12 +188,12 @@ int carl9170_collect_tally(struct ar9170 *ar)
188 188
189 if (ar->channel) { 189 if (ar->channel) {
190 info = &ar->survey[ar->channel->hw_value]; 190 info = &ar->survey[ar->channel->hw_value];
191 info->channel_time = ar->tally.active; 191 info->time = ar->tally.active;
192 info->channel_time_busy = ar->tally.cca; 192 info->time_busy = ar->tally.cca;
193 info->channel_time_tx = ar->tally.tx_time; 193 info->time_tx = ar->tally.tx_time;
194 do_div(info->channel_time, 1000); 194 do_div(info->time, 1000);
195 do_div(info->channel_time_busy, 1000); 195 do_div(info->time_busy, 1000);
196 do_div(info->channel_time_tx, 1000); 196 do_div(info->time_tx, 1000);
197 } 197 }
198 } 198 }
199 return 0; 199 return 0;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index ef5b6dc7b7f1..f1455a04cb62 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1690,9 +1690,9 @@ found:
1690 survey->filled |= SURVEY_INFO_IN_USE; 1690 survey->filled |= SURVEY_INFO_IN_USE;
1691 1691
1692 if (ar->fw.hw_counters) { 1692 if (ar->fw.hw_counters) {
1693 survey->filled |= SURVEY_INFO_CHANNEL_TIME | 1693 survey->filled |= SURVEY_INFO_TIME |
1694 SURVEY_INFO_CHANNEL_TIME_BUSY | 1694 SURVEY_INFO_TIME_BUSY |
1695 SURVEY_INFO_CHANNEL_TIME_TX; 1695 SURVEY_INFO_TIME_TX;
1696 } 1696 }
1697 1697
1698 return 0; 1698 return 0;
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index cfd0554cf140..3d57f8772389 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -86,7 +86,7 @@ static const struct radar_detector_specs fcc_radar_ref_types[] = {
86 FCC_PATTERN(1, 0, 5, 150, 230, 1, 23), 86 FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
87 FCC_PATTERN(2, 6, 10, 200, 500, 1, 16), 87 FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
88 FCC_PATTERN(3, 11, 20, 200, 500, 1, 12), 88 FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
89 FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 20), 89 FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
90 FCC_PATTERN(5, 0, 1, 333, 333, 1, 9), 90 FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
91}; 91};
92 92
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 73f12f196f14..086549b732b9 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -84,6 +84,7 @@ static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
84 if (!cur_ctl) 84 if (!cur_ctl)
85 goto out_fail; 85 goto out_fail;
86 86
87 spin_lock_init(&cur_ctl->skb_lock);
87 cur_ctl->ctl_blk_order = i; 88 cur_ctl->ctl_blk_order = i;
88 if (i == 0) { 89 if (i == 0) {
89 ch->head_blk_ctl = cur_ctl; 90 ch->head_blk_ctl = cur_ctl;
@@ -354,6 +355,8 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
354 * and while-do will not make any cycles. 355 * and while-do will not make any cycles.
355 */ 356 */
356 do { 357 do {
358 if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
359 break;
357 if (ctl->skb) { 360 if (ctl->skb) {
358 dma_unmap_single(NULL, ctl->desc->src_addr_l, 361 dma_unmap_single(NULL, ctl->desc->src_addr_l,
359 ctl->skb->len, DMA_TO_DEVICE); 362 ctl->skb->len, DMA_TO_DEVICE);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 7dd8873f757e..0783d2ed8238 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -298,6 +298,8 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
298 wcn36xx_debugfs_init(wcn); 298 wcn36xx_debugfs_init(wcn);
299 299
300 INIT_LIST_HEAD(&wcn->vif_list); 300 INIT_LIST_HEAD(&wcn->vif_list);
301 spin_lock_init(&wcn->dxe_lock);
302
301 return 0; 303 return 0;
302 304
303out_smd_stop: 305out_smd_stop:
@@ -795,6 +797,7 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
795 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n", 797 wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
796 vif, sta->addr); 798 vif, sta->addr);
797 799
800 spin_lock_init(&sta_priv->ampdu_lock);
798 vif_priv->sta = sta_priv; 801 vif_priv->sta = sta_priv;
799 sta_priv->vif = vif_priv; 802 sta_priv->vif = vif_priv;
800 /* 803 /*
@@ -873,21 +876,32 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
873 get_sta_index(vif, sta_priv)); 876 get_sta_index(vif, sta_priv));
874 wcn36xx_smd_add_ba(wcn); 877 wcn36xx_smd_add_ba(wcn);
875 wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv)); 878 wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
876 ieee80211_start_tx_ba_session(sta, tid, 0);
877 break; 879 break;
878 case IEEE80211_AMPDU_RX_STOP: 880 case IEEE80211_AMPDU_RX_STOP:
879 wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv)); 881 wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
880 break; 882 break;
881 case IEEE80211_AMPDU_TX_START: 883 case IEEE80211_AMPDU_TX_START:
884 spin_lock_bh(&sta_priv->ampdu_lock);
885 sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
886 spin_unlock_bh(&sta_priv->ampdu_lock);
887
882 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 888 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
883 break; 889 break;
884 case IEEE80211_AMPDU_TX_OPERATIONAL: 890 case IEEE80211_AMPDU_TX_OPERATIONAL:
891 spin_lock_bh(&sta_priv->ampdu_lock);
892 sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_OPERATIONAL;
893 spin_unlock_bh(&sta_priv->ampdu_lock);
894
885 wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1, 895 wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
886 get_sta_index(vif, sta_priv)); 896 get_sta_index(vif, sta_priv));
887 break; 897 break;
888 case IEEE80211_AMPDU_TX_STOP_FLUSH: 898 case IEEE80211_AMPDU_TX_STOP_FLUSH:
889 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 899 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
890 case IEEE80211_AMPDU_TX_STOP_CONT: 900 case IEEE80211_AMPDU_TX_STOP_CONT:
901 spin_lock_bh(&sta_priv->ampdu_lock);
902 sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_NONE;
903 spin_unlock_bh(&sta_priv->ampdu_lock);
904
891 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 905 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
892 break; 906 break;
893 default: 907 default:
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 63986931829e..69ed39731902 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -21,6 +21,61 @@
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include "smd.h" 22#include "smd.h"
23 23
24struct wcn36xx_cfg_val {
25 u32 cfg_id;
26 u32 value;
27};
28
29#define WCN36XX_CFG_VAL(id, val) \
30{ \
31 .cfg_id = WCN36XX_HAL_CFG_ ## id, \
32 .value = val \
33}
34
35static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
36 WCN36XX_CFG_VAL(CURRENT_TX_ANTENNA, 1),
37 WCN36XX_CFG_VAL(CURRENT_RX_ANTENNA, 1),
38 WCN36XX_CFG_VAL(LOW_GAIN_OVERRIDE, 0),
39 WCN36XX_CFG_VAL(POWER_STATE_PER_CHAIN, 785),
40 WCN36XX_CFG_VAL(CAL_PERIOD, 5),
41 WCN36XX_CFG_VAL(CAL_CONTROL, 1),
42 WCN36XX_CFG_VAL(PROXIMITY, 0),
43 WCN36XX_CFG_VAL(NETWORK_DENSITY, 3),
44 WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 6000),
45 WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64),
46 WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347),
47 WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 6),
48 WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 6),
49 WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000),
50 WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5),
51 WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10),
52 WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_TWO, 15),
53 WCN36XX_CFG_VAL(FIXED_RATE, 0),
54 WCN36XX_CFG_VAL(RETRYRATE_POLICY, 4),
55 WCN36XX_CFG_VAL(RETRYRATE_SECONDARY, 0),
56 WCN36XX_CFG_VAL(RETRYRATE_TERTIARY, 0),
57 WCN36XX_CFG_VAL(FORCE_POLICY_PROTECTION, 5),
58 WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_24GHZ, 1),
59 WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_5GHZ, 5),
60 WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_5GHZ, 5),
61 WCN36XX_CFG_VAL(MAX_BA_SESSIONS, 40),
62 WCN36XX_CFG_VAL(PS_DATA_INACTIVITY_TIMEOUT, 200),
63 WCN36XX_CFG_VAL(PS_ENABLE_BCN_FILTER, 1),
64 WCN36XX_CFG_VAL(PS_ENABLE_RSSI_MONITOR, 1),
65 WCN36XX_CFG_VAL(NUM_BEACON_PER_RSSI_AVERAGE, 20),
66 WCN36XX_CFG_VAL(STATS_PERIOD, 10),
67 WCN36XX_CFG_VAL(CFP_MAX_DURATION, 30000),
68 WCN36XX_CFG_VAL(FRAME_TRANS_ENABLED, 0),
69 WCN36XX_CFG_VAL(BA_THRESHOLD_HIGH, 128),
70 WCN36XX_CFG_VAL(MAX_BA_BUFFERS, 2560),
71 WCN36XX_CFG_VAL(DYNAMIC_PS_POLL_VALUE, 0),
72 WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
73 WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
74 WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
75 WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
76 WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
77};
78
24static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value) 79static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
25{ 80{
26 struct wcn36xx_hal_cfg *entry; 81 struct wcn36xx_hal_cfg *entry;
@@ -357,8 +412,10 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
357 412
358int wcn36xx_smd_start(struct wcn36xx *wcn) 413int wcn36xx_smd_start(struct wcn36xx *wcn)
359{ 414{
360 struct wcn36xx_hal_mac_start_req_msg msg_body; 415 struct wcn36xx_hal_mac_start_req_msg msg_body, *body;
361 int ret = 0; 416 int ret = 0;
417 int i;
418 size_t len;
362 419
363 mutex_lock(&wcn->hal_mutex); 420 mutex_lock(&wcn->hal_mutex);
364 INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ); 421 INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
@@ -368,10 +425,22 @@ int wcn36xx_smd_start(struct wcn36xx *wcn)
368 425
369 PREPARE_HAL_BUF(wcn->hal_buf, msg_body); 426 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
370 427
428 body = (struct wcn36xx_hal_mac_start_req_msg *)wcn->hal_buf;
429 len = body->header.len;
430
431 for (i = 0; i < ARRAY_SIZE(wcn36xx_cfg_vals); i++) {
432 ret = put_cfg_tlv_u32(wcn, &len, wcn36xx_cfg_vals[i].cfg_id,
433 wcn36xx_cfg_vals[i].value);
434 if (ret)
435 goto out;
436 }
437 body->header.len = len;
438 body->params.len = len - sizeof(*body);
439
371 wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n", 440 wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
372 msg_body.params.type); 441 msg_body.params.type);
373 442
374 ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); 443 ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
375 if (ret) { 444 if (ret) {
376 wcn36xx_err("Sending hal_start failed\n"); 445 wcn36xx_err("Sending hal_start failed\n");
377 goto out; 446 goto out;
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 32bb26a0db2a..9bec8237231d 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -93,6 +93,7 @@ static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
93 bd->pdu.mpdu_header_off; 93 bd->pdu.mpdu_header_off;
94 bd->pdu.mpdu_len = len; 94 bd->pdu.mpdu_len = len;
95 bd->pdu.tid = tid; 95 bd->pdu.tid = tid;
96 bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_QOS;
96} 97}
97 98
98static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn, 99static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
@@ -110,15 +111,54 @@ static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
110 wcn36xx_warn("vif %pM not found\n", addr); 111 wcn36xx_warn("vif %pM not found\n", addr);
111 return NULL; 112 return NULL;
112} 113}
114
115static void wcn36xx_tx_start_ampdu(struct wcn36xx *wcn,
116 struct wcn36xx_sta *sta_priv,
117 struct sk_buff *skb)
118{
119 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
120 struct ieee80211_sta *sta;
121 u8 *qc, tid;
122
123 if (!conf_is_ht(&wcn->hw->conf))
124 return;
125
126 sta = wcn36xx_priv_to_sta(sta_priv);
127
128 if (WARN_ON(!ieee80211_is_data_qos(hdr->frame_control)))
129 return;
130
131 if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
132 return;
133
134 qc = ieee80211_get_qos_ctl(hdr);
135 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
136
137 spin_lock(&sta_priv->ampdu_lock);
138 if (sta_priv->ampdu_state[tid] != WCN36XX_AMPDU_NONE)
139 goto out_unlock;
140
141 if (sta_priv->non_agg_frame_ct++ >= WCN36XX_AMPDU_START_THRESH) {
142 sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
143 sta_priv->non_agg_frame_ct = 0;
144 ieee80211_start_tx_ba_session(sta, tid, 0);
145 }
146out_unlock:
147 spin_unlock(&sta_priv->ampdu_lock);
148}
149
113static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd, 150static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
114 struct wcn36xx *wcn, 151 struct wcn36xx *wcn,
115 struct wcn36xx_vif **vif_priv, 152 struct wcn36xx_vif **vif_priv,
116 struct wcn36xx_sta *sta_priv, 153 struct wcn36xx_sta *sta_priv,
117 struct ieee80211_hdr *hdr, 154 struct sk_buff *skb,
118 bool bcast) 155 bool bcast)
119{ 156{
157 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
120 struct ieee80211_vif *vif = NULL; 158 struct ieee80211_vif *vif = NULL;
121 struct wcn36xx_vif *__vif_priv = NULL; 159 struct wcn36xx_vif *__vif_priv = NULL;
160 bool is_data_qos;
161
122 bd->bd_rate = WCN36XX_BD_RATE_DATA; 162 bd->bd_rate = WCN36XX_BD_RATE_DATA;
123 163
124 /* 164 /*
@@ -157,14 +197,26 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
157 bd->ack_policy = 1; 197 bd->ack_policy = 1;
158 } 198 }
159 *vif_priv = __vif_priv; 199 *vif_priv = __vif_priv;
200
201 is_data_qos = ieee80211_is_data_qos(hdr->frame_control);
202
203 wcn36xx_set_tx_pdu(bd,
204 is_data_qos ?
205 sizeof(struct ieee80211_qos_hdr) :
206 sizeof(struct ieee80211_hdr_3addr),
207 skb->len, sta_priv ? sta_priv->tid : 0);
208
209 if (sta_priv && is_data_qos)
210 wcn36xx_tx_start_ampdu(wcn, sta_priv, skb);
160} 211}
161 212
162static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd, 213static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
163 struct wcn36xx *wcn, 214 struct wcn36xx *wcn,
164 struct wcn36xx_vif **vif_priv, 215 struct wcn36xx_vif **vif_priv,
165 struct ieee80211_hdr *hdr, 216 struct sk_buff *skb,
166 bool bcast) 217 bool bcast)
167{ 218{
219 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
168 struct wcn36xx_vif *__vif_priv = 220 struct wcn36xx_vif *__vif_priv =
169 get_vif_by_addr(wcn, hdr->addr2); 221 get_vif_by_addr(wcn, hdr->addr2);
170 bd->sta_index = __vif_priv->self_sta_index; 222 bd->sta_index = __vif_priv->self_sta_index;
@@ -198,6 +250,12 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
198 } else 250 } else
199 bd->queue_id = WCN36XX_TX_U_WQ_ID; 251 bd->queue_id = WCN36XX_TX_U_WQ_ID;
200 *vif_priv = __vif_priv; 252 *vif_priv = __vif_priv;
253
254 wcn36xx_set_tx_pdu(bd,
255 ieee80211_is_data_qos(hdr->frame_control) ?
256 sizeof(struct ieee80211_qos_hdr) :
257 sizeof(struct ieee80211_hdr_3addr),
258 skb->len, WCN36XX_TID);
201} 259}
202 260
203int wcn36xx_start_tx(struct wcn36xx *wcn, 261int wcn36xx_start_tx(struct wcn36xx *wcn,
@@ -237,7 +295,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
237 295
238 bd->dpu_rf = WCN36XX_BMU_WQ_TX; 296 bd->dpu_rf = WCN36XX_BMU_WQ_TX;
239 297
240 bd->tx_comp = info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS; 298 bd->tx_comp = !!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS);
241 if (bd->tx_comp) { 299 if (bd->tx_comp) {
242 wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n"); 300 wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
243 spin_lock_irqsave(&wcn->dxe_lock, flags); 301 spin_lock_irqsave(&wcn->dxe_lock, flags);
@@ -259,22 +317,11 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
259 } 317 }
260 318
261 /* Data frames served first*/ 319 /* Data frames served first*/
262 if (is_low) { 320 if (is_low)
263 wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, hdr, bcast); 321 wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, skb, bcast);
264 wcn36xx_set_tx_pdu(bd, 322 else
265 ieee80211_is_data_qos(hdr->frame_control) ?
266 sizeof(struct ieee80211_qos_hdr) :
267 sizeof(struct ieee80211_hdr_3addr),
268 skb->len, sta_priv ? sta_priv->tid : 0);
269 } else {
270 /* MGMT and CTRL frames are handeld here*/ 323 /* MGMT and CTRL frames are handeld here*/
271 wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, hdr, bcast); 324 wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, skb, bcast);
272 wcn36xx_set_tx_pdu(bd,
273 ieee80211_is_data_qos(hdr->frame_control) ?
274 sizeof(struct ieee80211_qos_hdr) :
275 sizeof(struct ieee80211_hdr_3addr),
276 skb->len, WCN36XX_TID);
277 }
278 325
279 buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32)); 326 buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
280 bd->tx_bd_sign = 0xbdbdbdbd; 327 bd->tx_bd_sign = 0xbdbdbdbd;
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
index bbfbcf808c77..032216e82b2b 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.h
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -32,6 +32,12 @@
32#define WCN36XX_BD_RATE_MGMT 2 32#define WCN36XX_BD_RATE_MGMT 2
33#define WCN36XX_BD_RATE_CTRL 3 33#define WCN36XX_BD_RATE_CTRL 3
34 34
35enum wcn36xx_txbd_ssn_type {
36 WCN36XX_TXBD_SSN_FILL_HOST = 0,
37 WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS = 1,
38 WCN36XX_TXBD_SSN_FILL_DPU_QOS = 2,
39};
40
35struct wcn36xx_pdu { 41struct wcn36xx_pdu {
36 u32 dpu_fb:8; 42 u32 dpu_fb:8;
37 u32 adu_fb:8; 43 u32 adu_fb:8;
@@ -50,7 +56,8 @@ struct wcn36xx_pdu {
50 /* 0x0c*/ 56 /* 0x0c*/
51 u32 reserved4:8; 57 u32 reserved4:8;
52 u32 tid:4; 58 u32 tid:4;
53 u32 reserved3:4; 59 u32 bd_ssn:2;
60 u32 reserved3:2;
54 u32 mpdu_len:16; 61 u32 mpdu_len:16;
55}; 62};
56 63
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index f0fb81dfd17b..7b41e833e18c 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -32,6 +32,9 @@
32#define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin" 32#define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
33#define WCN36XX_AGGR_BUFFER_SIZE 64 33#define WCN36XX_AGGR_BUFFER_SIZE 64
34 34
35/* How many frames until we start a-mpdu TX session */
36#define WCN36XX_AMPDU_START_THRESH 20
37
35extern unsigned int wcn36xx_dbg_mask; 38extern unsigned int wcn36xx_dbg_mask;
36 39
37enum wcn36xx_debug_mask { 40enum wcn36xx_debug_mask {
@@ -74,6 +77,13 @@ enum wcn36xx_debug_mask {
74 buf, len, false); \ 77 buf, len, false); \
75} while (0) 78} while (0)
76 79
80enum wcn36xx_ampdu_state {
81 WCN36XX_AMPDU_NONE,
82 WCN36XX_AMPDU_INIT,
83 WCN36XX_AMPDU_START,
84 WCN36XX_AMPDU_OPERATIONAL,
85};
86
77#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value) 87#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
78#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band) 88#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
79#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq) 89#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
@@ -165,6 +175,10 @@ struct wcn36xx_sta {
165 bool is_data_encrypted; 175 bool is_data_encrypted;
166 /* Rates */ 176 /* Rates */
167 struct wcn36xx_hal_supported_rates supported_rates; 177 struct wcn36xx_hal_supported_rates supported_rates;
178
179 spinlock_t ampdu_lock; /* protects next two fields */
180 enum wcn36xx_ampdu_state ampdu_state[16];
181 int non_agg_frame_ct;
168}; 182};
169struct wcn36xx_dxe_ch; 183struct wcn36xx_dxe_ch;
170struct wcn36xx { 184struct wcn36xx {
@@ -243,4 +257,10 @@ static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
243} 257}
244void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates); 258void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
245 259
260static inline
261struct ieee80211_sta *wcn36xx_priv_to_sta(struct wcn36xx_sta *sta_priv)
262{
263 return container_of((void *)sta_priv, struct ieee80211_sta, drv_priv);
264}
265
246#endif /* _WCN36XX_H_ */ 266#endif /* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 481680a3aa55..ce8c0381825e 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -39,12 +39,3 @@ config WIL6210_TRACING
39 option if you are interested in debugging the driver. 39 option if you are interested in debugging the driver.
40 40
41 If unsure, say Y to make it easier to debug problems. 41 If unsure, say Y to make it easier to debug problems.
42
43config WIL6210_PLATFORM_MSM
44 bool "wil6210 MSM platform specific support"
45 depends on WIL6210
46 depends on ARCH_MSM
47 default y
48 ---help---
49 Say Y here to enable wil6210 driver support for MSM
50 platform specific features
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 8ad4b5f97e04..caa717bf52f3 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -14,7 +14,6 @@ wil6210-y += ioctl.o
14wil6210-y += fw.o 14wil6210-y += fw.o
15wil6210-$(CONFIG_WIL6210_TRACING) += trace.o 15wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
16wil6210-y += wil_platform.o 16wil6210-y += wil_platform.o
17wil6210-$(CONFIG_WIL6210_PLATFORM_MSM) += wil_platform_msm.o
18wil6210-y += ethtool.o 17wil6210-y += ethtool.o
19 18
20# for tracing framework to find trace.h 19# for tracing framework to find trace.h
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 38332a6dfb3a..2d5ea21be47e 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. 2 * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -142,14 +142,14 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
142 142
143 sinfo->generation = wil->sinfo_gen; 143 sinfo->generation = wil->sinfo_gen;
144 144
145 sinfo->filled = STATION_INFO_RX_BYTES | 145 sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) |
146 STATION_INFO_TX_BYTES | 146 BIT(NL80211_STA_INFO_TX_BYTES) |
147 STATION_INFO_RX_PACKETS | 147 BIT(NL80211_STA_INFO_RX_PACKETS) |
148 STATION_INFO_TX_PACKETS | 148 BIT(NL80211_STA_INFO_TX_PACKETS) |
149 STATION_INFO_RX_BITRATE | 149 BIT(NL80211_STA_INFO_RX_BITRATE) |
150 STATION_INFO_TX_BITRATE | 150 BIT(NL80211_STA_INFO_TX_BITRATE) |
151 STATION_INFO_RX_DROP_MISC | 151 BIT(NL80211_STA_INFO_RX_DROP_MISC) |
152 STATION_INFO_TX_FAILED; 152 BIT(NL80211_STA_INFO_TX_FAILED);
153 153
154 sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G; 154 sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
155 sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs); 155 sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
@@ -162,8 +162,8 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
162 sinfo->tx_packets = stats->tx_packets; 162 sinfo->tx_packets = stats->tx_packets;
163 sinfo->tx_failed = stats->tx_errors; 163 sinfo->tx_failed = stats->tx_errors;
164 164
165 if (test_bit(wil_status_fwconnected, &wil->status)) { 165 if (test_bit(wil_status_fwconnected, wil->status)) {
166 sinfo->filled |= STATION_INFO_SIGNAL; 166 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
167 sinfo->signal = reply.evt.sqi; 167 sinfo->signal = reply.evt.sqi;
168 } 168 }
169 169
@@ -282,7 +282,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
282 } 282 }
283 283
284 /* FW don't support scan after connection attempt */ 284 /* FW don't support scan after connection attempt */
285 if (test_bit(wil_status_dontscan, &wil->status)) { 285 if (test_bit(wil_status_dontscan, wil->status)) {
286 wil_err(wil, "Can't scan now\n"); 286 wil_err(wil, "Can't scan now\n");
287 return -EBUSY; 287 return -EBUSY;
288 } 288 }
@@ -334,6 +334,30 @@ out:
334 return rc; 334 return rc;
335} 335}
336 336
337static void wil_print_crypto(struct wil6210_priv *wil,
338 struct cfg80211_crypto_settings *c)
339{
340 int i, n;
341
342 wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n",
343 c->wpa_versions, c->cipher_group);
344 wil_dbg_misc(wil, "Pairwise ciphers [%d] {\n", c->n_ciphers_pairwise);
345 n = min_t(int, c->n_ciphers_pairwise, ARRAY_SIZE(c->ciphers_pairwise));
346 for (i = 0; i < n; i++)
347 wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
348 c->ciphers_pairwise[i]);
349 wil_dbg_misc(wil, "}\n");
350 wil_dbg_misc(wil, "AKM suites [%d] {\n", c->n_akm_suites);
351 n = min_t(int, c->n_akm_suites, ARRAY_SIZE(c->akm_suites));
352 for (i = 0; i < n; i++)
353 wil_dbg_misc(wil, " [%d] = 0x%08x\n", i,
354 c->akm_suites[i]);
355 wil_dbg_misc(wil, "}\n");
356 wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n",
357 c->control_port, be16_to_cpu(c->control_port_ethertype),
358 c->control_port_no_encrypt);
359}
360
337static void wil_print_connect_params(struct wil6210_priv *wil, 361static void wil_print_connect_params(struct wil6210_priv *wil,
338 struct cfg80211_connect_params *sme) 362 struct cfg80211_connect_params *sme)
339{ 363{
@@ -348,6 +372,7 @@ static void wil_print_connect_params(struct wil6210_priv *wil,
348 print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET, 372 print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET,
349 16, 1, sme->ssid, sme->ssid_len, true); 373 16, 1, sme->ssid, sme->ssid_len, true);
350 wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open"); 374 wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open");
375 wil_print_crypto(wil, &sme->crypto);
351} 376}
352 377
353static int wil_cfg80211_connect(struct wiphy *wiphy, 378static int wil_cfg80211_connect(struct wiphy *wiphy,
@@ -362,8 +387,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
362 int ch; 387 int ch;
363 int rc = 0; 388 int rc = 0;
364 389
365 if (test_bit(wil_status_fwconnecting, &wil->status) || 390 if (test_bit(wil_status_fwconnecting, wil->status) ||
366 test_bit(wil_status_fwconnected, &wil->status)) 391 test_bit(wil_status_fwconnected, wil->status))
367 return -EALREADY; 392 return -EALREADY;
368 393
369 wil_print_connect_params(wil, sme); 394 wil_print_connect_params(wil, sme);
@@ -450,15 +475,16 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
450 memcpy(conn.bssid, bss->bssid, ETH_ALEN); 475 memcpy(conn.bssid, bss->bssid, ETH_ALEN);
451 memcpy(conn.dst_mac, bss->bssid, ETH_ALEN); 476 memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
452 477
453 set_bit(wil_status_fwconnecting, &wil->status); 478 set_bit(wil_status_fwconnecting, wil->status);
454 479
455 rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn)); 480 rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
456 if (rc == 0) { 481 if (rc == 0) {
482 netif_carrier_on(ndev);
457 /* Connect can take lots of time */ 483 /* Connect can take lots of time */
458 mod_timer(&wil->connect_timer, 484 mod_timer(&wil->connect_timer,
459 jiffies + msecs_to_jiffies(2000)); 485 jiffies + msecs_to_jiffies(2000));
460 } else { 486 } else {
461 clear_bit(wil_status_fwconnecting, &wil->status); 487 clear_bit(wil_status_fwconnecting, wil->status);
462 } 488 }
463 489
464 out: 490 out:
@@ -618,18 +644,6 @@ static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
618 b->assocresp_ies, b->assocresp_ies_len); 644 b->assocresp_ies, b->assocresp_ies_len);
619} 645}
620 646
621static void wil_print_crypto(struct wil6210_priv *wil,
622 struct cfg80211_crypto_settings *c)
623{
624 wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n",
625 c->wpa_versions, c->cipher_group);
626 wil_dbg_misc(wil, "Pairwise ciphers [%d]\n", c->n_ciphers_pairwise);
627 wil_dbg_misc(wil, "AKM suites [%d]\n", c->n_akm_suites);
628 wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n",
629 c->control_port, be16_to_cpu(c->control_port_ethertype),
630 c->control_port_no_encrypt);
631}
632
633static int wil_fix_bcon(struct wil6210_priv *wil, 647static int wil_fix_bcon(struct wil6210_priv *wil,
634 struct cfg80211_beacon_data *bcon) 648 struct cfg80211_beacon_data *bcon)
635{ 649{
@@ -757,12 +771,12 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
757 771
758 wil->secure_pcp = info->privacy; 772 wil->secure_pcp = info->privacy;
759 773
774 netif_carrier_on(ndev);
775
760 rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype, 776 rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
761 channel->hw_value); 777 channel->hw_value);
762 if (rc) 778 if (rc)
763 goto out; 779 netif_carrier_off(ndev);
764
765 netif_carrier_on(ndev);
766 780
767out: 781out:
768 mutex_unlock(&wil->mutex); 782 mutex_unlock(&wil->mutex);
@@ -772,23 +786,26 @@ out:
772static int wil_cfg80211_stop_ap(struct wiphy *wiphy, 786static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
773 struct net_device *ndev) 787 struct net_device *ndev)
774{ 788{
775 int rc, rc1;
776 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 789 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
777 790
778 wil_dbg_misc(wil, "%s()\n", __func__); 791 wil_dbg_misc(wil, "%s()\n", __func__);
779 792
793 netif_carrier_off(ndev);
780 wil_set_recovery_state(wil, fw_recovery_idle); 794 wil_set_recovery_state(wil, fw_recovery_idle);
781 795
782 mutex_lock(&wil->mutex); 796 mutex_lock(&wil->mutex);
783 797
784 rc = wmi_pcp_stop(wil); 798 wmi_pcp_stop(wil);
785 799
786 __wil_down(wil); 800 __wil_down(wil);
787 rc1 = __wil_up(wil); 801 __wil_up(wil);
788 802
789 mutex_unlock(&wil->mutex); 803 mutex_unlock(&wil->mutex);
790 804
791 return min(rc, rc1); 805 /* some functions above might fail (e.g. __wil_up). Nevertheless, we
806 * return success because AP has stopped
807 */
808 return 0;
792} 809}
793 810
794static int wil_cfg80211_del_station(struct wiphy *wiphy, 811static int wil_cfg80211_del_station(struct wiphy *wiphy,
@@ -804,6 +821,96 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
804 return 0; 821 return 0;
805} 822}
806 823
824/* probe_client handling */
825static void wil_probe_client_handle(struct wil6210_priv *wil,
826 struct wil_probe_client_req *req)
827{
828 struct net_device *ndev = wil_to_ndev(wil);
829 struct wil_sta_info *sta = &wil->sta[req->cid];
830 /* assume STA is alive if it is still connected,
831 * else FW will disconnect it
832 */
833 bool alive = (sta->status == wil_sta_connected);
834
835 cfg80211_probe_status(ndev, sta->addr, req->cookie, alive, GFP_KERNEL);
836}
837
838static struct list_head *next_probe_client(struct wil6210_priv *wil)
839{
840 struct list_head *ret = NULL;
841
842 mutex_lock(&wil->probe_client_mutex);
843
844 if (!list_empty(&wil->probe_client_pending)) {
845 ret = wil->probe_client_pending.next;
846 list_del(ret);
847 }
848
849 mutex_unlock(&wil->probe_client_mutex);
850
851 return ret;
852}
853
854void wil_probe_client_worker(struct work_struct *work)
855{
856 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
857 probe_client_worker);
858 struct wil_probe_client_req *req;
859 struct list_head *lh;
860
861 while ((lh = next_probe_client(wil)) != NULL) {
862 req = list_entry(lh, struct wil_probe_client_req, list);
863
864 wil_probe_client_handle(wil, req);
865 kfree(req);
866 }
867}
868
869void wil_probe_client_flush(struct wil6210_priv *wil)
870{
871 struct wil_probe_client_req *req, *t;
872
873 wil_dbg_misc(wil, "%s()\n", __func__);
874
875 mutex_lock(&wil->probe_client_mutex);
876
877 list_for_each_entry_safe(req, t, &wil->probe_client_pending, list) {
878 list_del(&req->list);
879 kfree(req);
880 }
881
882 mutex_unlock(&wil->probe_client_mutex);
883}
884
885static int wil_cfg80211_probe_client(struct wiphy *wiphy,
886 struct net_device *dev,
887 const u8 *peer, u64 *cookie)
888{
889 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
890 struct wil_probe_client_req *req;
891 int cid = wil_find_cid(wil, peer);
892
893 wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid);
894
895 if (cid < 0)
896 return -ENOLINK;
897
898 req = kzalloc(sizeof(*req), GFP_KERNEL);
899 if (!req)
900 return -ENOMEM;
901
902 req->cid = cid;
903 req->cookie = cid;
904
905 mutex_lock(&wil->probe_client_mutex);
906 list_add_tail(&req->list, &wil->probe_client_pending);
907 mutex_unlock(&wil->probe_client_mutex);
908
909 *cookie = req->cookie;
910 queue_work(wil->wq_service, &wil->probe_client_worker);
911 return 0;
912}
913
807static struct cfg80211_ops wil_cfg80211_ops = { 914static struct cfg80211_ops wil_cfg80211_ops = {
808 .scan = wil_cfg80211_scan, 915 .scan = wil_cfg80211_scan,
809 .connect = wil_cfg80211_connect, 916 .connect = wil_cfg80211_connect,
@@ -823,6 +930,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
823 .start_ap = wil_cfg80211_start_ap, 930 .start_ap = wil_cfg80211_start_ap,
824 .stop_ap = wil_cfg80211_stop_ap, 931 .stop_ap = wil_cfg80211_stop_ap,
825 .del_station = wil_cfg80211_del_station, 932 .del_station = wil_cfg80211_del_station,
933 .probe_client = wil_cfg80211_probe_client,
826}; 934};
827 935
828static void wil_wiphy_init(struct wiphy *wiphy) 936static void wil_wiphy_init(struct wiphy *wiphy)
@@ -854,6 +962,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
854 wiphy->cipher_suites = wil_cipher_suites; 962 wiphy->cipher_suites = wil_cipher_suites;
855 wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites); 963 wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
856 wiphy->mgmt_stypes = wil_mgmt_stypes; 964 wiphy->mgmt_stypes = wil_mgmt_stypes;
965 wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
857} 966}
858 967
859struct wireless_dev *wil_cfg80211_init(struct device *dev) 968struct wireless_dev *wil_cfg80211_init(struct device *dev)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 4e6e14501c2f..45c3558ec804 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -50,6 +50,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
50 char _s, char _h) 50 char _s, char _h)
51{ 51{
52 void __iomem *x = wmi_addr(wil, vring->hwtail); 52 void __iomem *x = wmi_addr(wil, vring->hwtail);
53 u32 v;
53 54
54 seq_printf(s, "VRING %s = {\n", name); 55 seq_printf(s, "VRING %s = {\n", name);
55 seq_printf(s, " pa = %pad\n", &vring->pa); 56 seq_printf(s, " pa = %pad\n", &vring->pa);
@@ -58,10 +59,12 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
58 seq_printf(s, " swtail = %d\n", vring->swtail); 59 seq_printf(s, " swtail = %d\n", vring->swtail);
59 seq_printf(s, " swhead = %d\n", vring->swhead); 60 seq_printf(s, " swhead = %d\n", vring->swhead);
60 seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail); 61 seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail);
61 if (x) 62 if (x) {
62 seq_printf(s, "0x%08x\n", ioread32(x)); 63 v = ioread32(x);
63 else 64 seq_printf(s, "0x%08x = %d\n", v, v);
65 } else {
64 seq_puts(s, "???\n"); 66 seq_puts(s, "???\n");
67 }
65 68
66 if (vring->va && (vring->size < 1025)) { 69 if (vring->va && (vring->size < 1025)) {
67 uint i; 70 uint i;
@@ -101,8 +104,8 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
101 char name[10]; 104 char name[10];
102 /* performance monitoring */ 105 /* performance monitoring */
103 cycles_t now = get_cycles(); 106 cycles_t now = get_cycles();
104 cycles_t idle = txdata->idle * 100; 107 uint64_t idle = txdata->idle * 100;
105 cycles_t total = now - txdata->begin; 108 uint64_t total = now - txdata->begin;
106 109
107 do_div(idle, total); 110 do_div(idle, total);
108 txdata->begin = now; 111 txdata->begin = now;
@@ -110,9 +113,12 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
110 113
111 snprintf(name, sizeof(name), "tx_%2d", i); 114 snprintf(name, sizeof(name), "tx_%2d", i);
112 115
113 seq_printf(s, "\n%pM CID %d TID %d [%3d|%3d] idle %3d%%\n", 116 seq_printf(s,
114 wil->sta[cid].addr, cid, tid, used, avail, 117 "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %3d%%\n",
115 (int)idle); 118 wil->sta[cid].addr, cid, tid,
119 txdata->agg_wsize, txdata->agg_timeout,
120 txdata->agg_amsdu ? "+" : "-",
121 used, avail, (int)idle);
116 122
117 wil_print_vring(s, wil, name, vring, '_', 'H'); 123 wil_print_vring(s, wil, name, vring, '_', 'H');
118 } 124 }
@@ -384,24 +390,67 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
384 return 0; 390 return 0;
385} 391}
386 392
387static const struct dbg_off itr_cnt_off[] = { 393static const struct dbg_off lgc_itr_cnt_off[] = {
388 {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32}, 394 {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
389 {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32}, 395 {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
390 {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32}, 396 {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
391 {}, 397 {},
392}; 398};
393 399
400static const struct dbg_off tx_itr_cnt_off[] = {
401 {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
402 doff_io32},
403 {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
404 doff_io32},
405 {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
406 doff_io32},
407 {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
408 doff_io32},
409 {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
410 doff_io32},
411 {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
412 doff_io32},
413 {},
414};
415
416static const struct dbg_off rx_itr_cnt_off[] = {
417 {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
418 doff_io32},
419 {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
420 doff_io32},
421 {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
422 doff_io32},
423 {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
424 doff_io32},
425 {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
426 doff_io32},
427 {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
428 doff_io32},
429 {},
430};
431
394static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil, 432static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
395 struct dentry *parent) 433 struct dentry *parent)
396{ 434{
397 struct dentry *d = debugfs_create_dir("ITR_CNT", parent); 435 struct dentry *d, *dtx, *drx;
398 436
437 d = debugfs_create_dir("ITR_CNT", parent);
399 if (IS_ERR_OR_NULL(d)) 438 if (IS_ERR_OR_NULL(d))
400 return -ENODEV; 439 return -ENODEV;
401 440
441 dtx = debugfs_create_dir("TX", d);
442 drx = debugfs_create_dir("RX", d);
443 if (IS_ERR_OR_NULL(dtx) || IS_ERR_OR_NULL(drx))
444 return -ENODEV;
445
402 wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr, 446 wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr,
403 itr_cnt_off); 447 lgc_itr_cnt_off);
448
449 wil6210_debugfs_init_offset(wil, dtx, (void * __force)wil->csr,
450 tx_itr_cnt_off);
404 451
452 wil6210_debugfs_init_offset(wil, drx, (void * __force)wil->csr,
453 rx_itr_cnt_off);
405 return 0; 454 return 0;
406} 455}
407 456
@@ -558,6 +607,87 @@ static const struct file_operations fops_rxon = {
558 .open = simple_open, 607 .open = simple_open,
559}; 608};
560 609
610/* block ack control, write:
611 * - "add <ringid> <agg_size> <timeout>" to trigger ADDBA
612 * - "del_tx <ringid> <reason>" to trigger DELBA for Tx side
613 * - "del_rx <CID> <TID> <reason>" to trigger DELBA for Rx side
614 */
615static ssize_t wil_write_back(struct file *file, const char __user *buf,
616 size_t len, loff_t *ppos)
617{
618 struct wil6210_priv *wil = file->private_data;
619 int rc;
620 char *kbuf = kmalloc(len + 1, GFP_KERNEL);
621 char cmd[8];
622 int p1, p2, p3;
623
624 if (!kbuf)
625 return -ENOMEM;
626
627 rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
628 if (rc != len) {
629 kfree(kbuf);
630 return rc >= 0 ? -EIO : rc;
631 }
632
633 kbuf[len] = '\0';
634 rc = sscanf(kbuf, "%8s %d %d %d", cmd, &p1, &p2, &p3);
635 kfree(kbuf);
636
637 if (rc < 0)
638 return rc;
639 if (rc < 2)
640 return -EINVAL;
641
642 if (0 == strcmp(cmd, "add")) {
643 if (rc < 3) {
644 wil_err(wil, "BACK: add require at least 2 params\n");
645 return -EINVAL;
646 }
647 if (rc < 4)
648 p3 = 0;
649 wmi_addba(wil, p1, p2, p3);
650 } else if (0 == strcmp(cmd, "del_tx")) {
651 if (rc < 3)
652 p2 = WLAN_REASON_QSTA_LEAVE_QBSS;
653 wmi_delba_tx(wil, p1, p2);
654 } else if (0 == strcmp(cmd, "del_rx")) {
655 if (rc < 3) {
656 wil_err(wil,
657 "BACK: del_rx require at least 2 params\n");
658 return -EINVAL;
659 }
660 if (rc < 4)
661 p3 = WLAN_REASON_QSTA_LEAVE_QBSS;
662 wmi_delba_rx(wil, mk_cidxtid(p1, p2), p3);
663 } else {
664 wil_err(wil, "BACK: Unrecognized command \"%s\"\n", cmd);
665 return -EINVAL;
666 }
667
668 return len;
669}
670
671static ssize_t wil_read_back(struct file *file, char __user *user_buf,
672 size_t count, loff_t *ppos)
673{
674 static const char text[] = "block ack control, write:\n"
675 " - \"add <ringid> <agg_size> <timeout>\" to trigger ADDBA\n"
676 "If missing, <timeout> defaults to 0\n"
677 " - \"del_tx <ringid> <reason>\" to trigger DELBA for Tx side\n"
678 " - \"del_rx <CID> <TID> <reason>\" to trigger DELBA for Rx side\n"
679 "If missing, <reason> set to \"STA_LEAVING\" (36)\n";
680
681 return simple_read_from_buffer(user_buf, count, ppos, text,
682 sizeof(text));
683}
684
685static const struct file_operations fops_back = {
686 .read = wil_read_back,
687 .write = wil_write_back,
688 .open = simple_open,
689};
690
561/*---tx_mgmt---*/ 691/*---tx_mgmt---*/
562/* Write mgmt frame to this file to send it */ 692/* Write mgmt frame to this file to send it */
563static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, 693static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
@@ -1116,7 +1246,8 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
1116 int i; 1246 int i;
1117 u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size; 1247 u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
1118 1248
1119 seq_printf(s, "0x%03x [", r->head_seq_num); 1249 seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
1250 r->head_seq_num);
1120 for (i = 0; i < r->buf_size; i++) { 1251 for (i = 0; i < r->buf_size; i++) {
1121 if (i == index) 1252 if (i == index)
1122 seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|'); 1253 seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|');
@@ -1127,10 +1258,10 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
1127} 1258}
1128 1259
1129static int wil_sta_debugfs_show(struct seq_file *s, void *data) 1260static int wil_sta_debugfs_show(struct seq_file *s, void *data)
1261__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
1130{ 1262{
1131 struct wil6210_priv *wil = s->private; 1263 struct wil6210_priv *wil = s->private;
1132 int i, tid; 1264 int i, tid;
1133 unsigned long flags;
1134 1265
1135 for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { 1266 for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
1136 struct wil_sta_info *p = &wil->sta[i]; 1267 struct wil_sta_info *p = &wil->sta[i];
@@ -1151,7 +1282,7 @@ static int wil_sta_debugfs_show(struct seq_file *s, void *data)
1151 (p->data_port_open ? " data_port_open" : "")); 1282 (p->data_port_open ? " data_port_open" : ""));
1152 1283
1153 if (p->status == wil_sta_connected) { 1284 if (p->status == wil_sta_connected) {
1154 spin_lock_irqsave(&p->tid_rx_lock, flags); 1285 spin_lock_bh(&p->tid_rx_lock);
1155 for (tid = 0; tid < WIL_STA_TID_NUM; tid++) { 1286 for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
1156 struct wil_tid_ampdu_rx *r = p->tid_rx[tid]; 1287 struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
1157 1288
@@ -1160,7 +1291,7 @@ static int wil_sta_debugfs_show(struct seq_file *s, void *data)
1160 wil_print_rxtid(s, r); 1291 wil_print_rxtid(s, r);
1161 } 1292 }
1162 } 1293 }
1163 spin_unlock_irqrestore(&p->tid_rx_lock, flags); 1294 spin_unlock_bh(&p->tid_rx_lock);
1164 } 1295 }
1165 } 1296 }
1166 1297
@@ -1217,6 +1348,7 @@ static const struct {
1217 {"rxon", S_IWUSR, &fops_rxon}, 1348 {"rxon", S_IWUSR, &fops_rxon},
1218 {"tx_mgmt", S_IWUSR, &fops_txmgmt}, 1349 {"tx_mgmt", S_IWUSR, &fops_txmgmt},
1219 {"wmi_send", S_IWUSR, &fops_wmi}, 1350 {"wmi_send", S_IWUSR, &fops_wmi},
1351 {"back", S_IRUGO | S_IWUSR, &fops_back},
1220 {"temp", S_IRUGO, &fops_temp}, 1352 {"temp", S_IRUGO, &fops_temp},
1221 {"freq", S_IRUGO, &fops_freq}, 1353 {"freq", S_IRUGO, &fops_freq},
1222 {"link", S_IRUGO, &fops_link}, 1354 {"link", S_IRUGO, &fops_link},
@@ -1261,7 +1393,7 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
1261/* fields in struct wil6210_priv */ 1393/* fields in struct wil6210_priv */
1262static const struct dbg_off dbg_wil_off[] = { 1394static const struct dbg_off dbg_wil_off[] = {
1263 WIL_FIELD(secure_pcp, S_IRUGO | S_IWUSR, doff_u32), 1395 WIL_FIELD(secure_pcp, S_IRUGO | S_IWUSR, doff_u32),
1264 WIL_FIELD(status, S_IRUGO | S_IWUSR, doff_ulong), 1396 WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
1265 WIL_FIELD(fw_version, S_IRUGO, doff_u32), 1397 WIL_FIELD(fw_version, S_IRUGO, doff_u32),
1266 WIL_FIELD(hw_version, S_IRUGO, doff_x32), 1398 WIL_FIELD(hw_version, S_IRUGO, doff_x32),
1267 WIL_FIELD(recovery_count, S_IRUGO, doff_u32), 1399 WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index d686638972be..4c44a82c34d7 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -45,16 +45,35 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
45 struct ethtool_coalesce *cp) 45 struct ethtool_coalesce *cp)
46{ 46{
47 struct wil6210_priv *wil = ndev_to_wil(ndev); 47 struct wil6210_priv *wil = ndev_to_wil(ndev);
48 u32 itr_en, itr_val = 0; 48 u32 tx_itr_en, tx_itr_val = 0;
49 u32 rx_itr_en, rx_itr_val = 0;
49 50
50 wil_dbg_misc(wil, "%s()\n", __func__); 51 wil_dbg_misc(wil, "%s()\n", __func__);
51 52
52 itr_en = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL)); 53 if (test_bit(hw_capability_advanced_itr_moderation,
53 if (itr_en & BIT_DMA_ITR_CNT_CRL_EN) 54 wil->hw_capabilities)) {
54 itr_val = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_TRSH)); 55 tx_itr_en = ioread32(wil->csr +
55 56 HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
56 cp->rx_coalesce_usecs = itr_val; 57 if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
58 tx_itr_val =
59 ioread32(wil->csr +
60 HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
61
62 rx_itr_en = ioread32(wil->csr +
63 HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
64 if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
65 rx_itr_val =
66 ioread32(wil->csr +
67 HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
68 } else {
69 rx_itr_en = ioread32(wil->csr + HOSTADDR(RGF_DMA_ITR_CNT_CRL));
70 if (rx_itr_en & BIT_DMA_ITR_CNT_CRL_EN)
71 rx_itr_val = ioread32(wil->csr +
72 HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
73 }
57 74
75 cp->tx_coalesce_usecs = tx_itr_val;
76 cp->rx_coalesce_usecs = rx_itr_val;
58 return 0; 77 return 0;
59} 78}
60 79
@@ -63,22 +82,25 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
63{ 82{
64 struct wil6210_priv *wil = ndev_to_wil(ndev); 83 struct wil6210_priv *wil = ndev_to_wil(ndev);
65 84
66 wil_dbg_misc(wil, "%s(%d usec)\n", __func__, cp->rx_coalesce_usecs); 85 wil_dbg_misc(wil, "%s(rx %d usec, tx %d usec)\n", __func__,
86 cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
67 87
68 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { 88 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
69 wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n"); 89 wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n");
70 return -EINVAL; 90 return -EINVAL;
71 } 91 }
72 92
73 /* only @rx_coalesce_usecs supported, ignore 93 /* only @rx_coalesce_usecs and @tx_coalesce_usecs supported,
74 * other parameters 94 * ignore other parameters
75 */ 95 */
76 96
77 if (cp->rx_coalesce_usecs > WIL6210_ITR_TRSH_MAX) 97 if (cp->rx_coalesce_usecs > WIL6210_ITR_TRSH_MAX ||
98 cp->tx_coalesce_usecs > WIL6210_ITR_TRSH_MAX)
78 goto out_bad; 99 goto out_bad;
79 100
80 wil->itr_trsh = cp->rx_coalesce_usecs; 101 wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
81 wil_set_itr_trsh(wil); 102 wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
103 wil_configure_interrupt_moderation(wil);
82 104
83 return 0; 105 return 0;
84 106
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 4bcbd6297b3e..a6f923086f31 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -102,7 +102,7 @@ static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
102 iowrite32(WIL6210_IRQ_DISABLE, wil->csr + 102 iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
103 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); 103 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
104 104
105 clear_bit(wil_status_irqen, &wil->status); 105 clear_bit(wil_status_irqen, wil->status);
106} 106}
107 107
108void wil6210_unmask_irq_tx(struct wil6210_priv *wil) 108void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
@@ -130,7 +130,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
130{ 130{
131 wil_dbg_irq(wil, "%s()\n", __func__); 131 wil_dbg_irq(wil, "%s()\n", __func__);
132 132
133 set_bit(wil_status_irqen, &wil->status); 133 set_bit(wil_status_irqen, wil->status);
134 134
135 iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr + 135 iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
136 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); 136 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
@@ -157,15 +157,91 @@ void wil_unmask_irq(struct wil6210_priv *wil)
157 iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + 157 iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
158 offsetof(struct RGF_ICR, ICC)); 158 offsetof(struct RGF_ICR, ICC));
159 159
160 /* interrupt moderation parameters */
161 wil_set_itr_trsh(wil);
162
163 wil6210_unmask_irq_pseudo(wil); 160 wil6210_unmask_irq_pseudo(wil);
164 wil6210_unmask_irq_tx(wil); 161 wil6210_unmask_irq_tx(wil);
165 wil6210_unmask_irq_rx(wil); 162 wil6210_unmask_irq_rx(wil);
166 wil6210_unmask_irq_misc(wil); 163 wil6210_unmask_irq_misc(wil);
167} 164}
168 165
166/* target write operation */
167#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
168
169static
170void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil)
171{
172 /* Disable and clear tx counter before (re)configuration */
173 W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
174 W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
175 wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
176 wil->tx_max_burst_duration);
177 /* Configure TX max burst duration timer to use usec units */
178 W(RGF_DMA_ITR_TX_CNT_CTL,
179 BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
180
181 /* Disable and clear tx idle counter before (re)configuration */
182 W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
183 W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
184 wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
185 wil->tx_interframe_timeout);
186 /* Configure TX max burst duration timer to use usec units */
187 W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
188 BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
189
190 /* Disable and clear rx counter before (re)configuration */
191 W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
192 W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
193 wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
194 wil->rx_max_burst_duration);
195 /* Configure TX max burst duration timer to use usec units */
196 W(RGF_DMA_ITR_RX_CNT_CTL,
197 BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
198
199 /* Disable and clear rx idle counter before (re)configuration */
200 W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
201 W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
202 wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
203 wil->rx_interframe_timeout);
204 /* Configure TX max burst duration timer to use usec units */
205 W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
206 BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
207}
208
209static
210void wil_configure_interrupt_moderation_lgc(struct wil6210_priv *wil)
211{
212 /* disable, use usec resolution */
213 W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_CLR);
214
215 wil_info(wil, "set ITR_TRSH = %d usec\n", wil->rx_max_burst_duration);
216 W(RGF_DMA_ITR_CNT_TRSH, wil->rx_max_burst_duration);
217 /* start it */
218 W(RGF_DMA_ITR_CNT_CRL,
219 BIT_DMA_ITR_CNT_CRL_EN | BIT_DMA_ITR_CNT_CRL_EXT_TICK);
220}
221
222#undef W
223
224void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
225{
226 wil_dbg_irq(wil, "%s()\n", __func__);
227
228 /* disable interrupt moderation for monitor
229 * to get better timestamp precision
230 */
231 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
232 return;
233
234 if (test_bit(hw_capability_advanced_itr_moderation,
235 wil->hw_capabilities))
236 wil_configure_interrupt_moderation_new(wil);
237 else {
238 /* Advanced interrupt moderation is not available before
239 * Sparrow v2. Will use legacy interrupt moderation
240 */
241 wil_configure_interrupt_moderation_lgc(wil);
242 }
243}
244
169static irqreturn_t wil6210_irq_rx(int irq, void *cookie) 245static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
170{ 246{
171 struct wil6210_priv *wil = cookie; 247 struct wil6210_priv *wil = cookie;
@@ -194,18 +270,19 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
194 wil_dbg_irq(wil, "RX done\n"); 270 wil_dbg_irq(wil, "RX done\n");
195 271
196 if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH) 272 if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH)
197 wil_err_ratelimited(wil, "Received \"Rx buffer is in risk " 273 wil_err_ratelimited(wil,
198 "of overflow\" interrupt\n"); 274 "Received \"Rx buffer is in risk of overflow\" interrupt\n");
199 275
200 isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH); 276 isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
201 if (test_bit(wil_status_reset_done, &wil->status)) { 277 BIT_DMA_EP_RX_ICR_RX_HTRSH);
202 if (test_bit(wil_status_napi_en, &wil->status)) { 278 if (test_bit(wil_status_reset_done, wil->status)) {
279 if (test_bit(wil_status_napi_en, wil->status)) {
203 wil_dbg_txrx(wil, "NAPI(Rx) schedule\n"); 280 wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
204 need_unmask = false; 281 need_unmask = false;
205 napi_schedule(&wil->napi_rx); 282 napi_schedule(&wil->napi_rx);
206 } else { 283 } else {
207 wil_err(wil, "Got Rx interrupt while " 284 wil_err(wil,
208 "stopping interface\n"); 285 "Got Rx interrupt while stopping interface\n");
209 } 286 }
210 } else { 287 } else {
211 wil_err(wil, "Got Rx interrupt while in reset\n"); 288 wil_err(wil, "Got Rx interrupt while in reset\n");
@@ -248,7 +325,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
248 isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; 325 isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
249 /* clear also all VRING interrupts */ 326 /* clear also all VRING interrupts */
250 isr &= ~(BIT(25) - 1UL); 327 isr &= ~(BIT(25) - 1UL);
251 if (test_bit(wil_status_reset_done, &wil->status)) { 328 if (test_bit(wil_status_reset_done, wil->status)) {
252 wil_dbg_txrx(wil, "NAPI(Tx) schedule\n"); 329 wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
253 need_unmask = false; 330 need_unmask = false;
254 napi_schedule(&wil->napi_tx); 331 napi_schedule(&wil->napi_tx);
@@ -310,7 +387,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
310 387
311 if (isr & ISR_MISC_FW_ERROR) { 388 if (isr & ISR_MISC_FW_ERROR) {
312 wil_err(wil, "Firmware error detected\n"); 389 wil_err(wil, "Firmware error detected\n");
313 clear_bit(wil_status_fwready, &wil->status); 390 clear_bit(wil_status_fwready, wil->status);
314 /* 391 /*
315 * do not clear @isr here - we do 2-nd part in thread 392 * do not clear @isr here - we do 2-nd part in thread
316 * there, user space get notified, and it should be done 393 * there, user space get notified, and it should be done
@@ -321,7 +398,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
321 if (isr & ISR_MISC_FW_READY) { 398 if (isr & ISR_MISC_FW_READY) {
322 wil_dbg_irq(wil, "IRQ: FW ready\n"); 399 wil_dbg_irq(wil, "IRQ: FW ready\n");
323 wil_cache_mbox_regs(wil); 400 wil_cache_mbox_regs(wil);
324 set_bit(wil_status_reset_done, &wil->status); 401 set_bit(wil_status_reset_done, wil->status);
325 /** 402 /**
326 * Actual FW ready indicated by the 403 * Actual FW ready indicated by the
327 * WMI_FW_READY_EVENTID 404 * WMI_FW_READY_EVENTID
@@ -394,7 +471,7 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
394 */ 471 */
395static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause) 472static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
396{ 473{
397 if (!test_bit(wil_status_irqen, &wil->status)) { 474 if (!test_bit(wil_status_irqen, wil->status)) {
398 u32 icm_rx = wil_ioread32_and_clear(wil->csr + 475 u32 icm_rx = wil_ioread32_and_clear(wil->csr +
399 HOSTADDR(RGF_DMA_EP_RX_ICR) + 476 HOSTADDR(RGF_DMA_EP_RX_ICR) +
400 offsetof(struct RGF_ICR, ICM)); 477 offsetof(struct RGF_ICR, ICM));
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 8ff3fe34fe05..b04e0afdcb21 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. 2 * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -33,15 +33,18 @@ static bool no_fw_load = true;
33module_param(no_fw_load, bool, S_IRUGO | S_IWUSR); 33module_param(no_fw_load, bool, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(no_fw_load, " do not download FW, use one in on-card flash."); 34MODULE_PARM_DESC(no_fw_load, " do not download FW, use one in on-card flash.");
35 35
36static unsigned int itr_trsh = WIL6210_ITR_TRSH_DEFAULT; 36/* if not set via modparam, will be set to default value of 1/8 of
37 37 * rx ring size during init flow
38module_param(itr_trsh, uint, S_IRUGO); 38 */
39MODULE_PARM_DESC(itr_trsh, " Interrupt moderation threshold, usecs."); 39unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT;
40module_param(rx_ring_overflow_thrsh, ushort, S_IRUGO);
41MODULE_PARM_DESC(rx_ring_overflow_thrsh,
42 " RX ring overflow threshold in descriptors.");
40 43
41/* We allow allocation of more than 1 page buffers to support large packets. 44/* We allow allocation of more than 1 page buffers to support large packets.
42 * It is suboptimal behavior performance wise in case MTU above page size. 45 * It is suboptimal behavior performance wise in case MTU above page size.
43 */ 46 */
44unsigned int mtu_max = TXRX_BUF_LEN_DEFAULT - ETH_HLEN; 47unsigned int mtu_max = TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
45static int mtu_max_set(const char *val, const struct kernel_param *kp) 48static int mtu_max_set(const char *val, const struct kernel_param *kp)
46{ 49{
47 int ret; 50 int ret;
@@ -53,7 +56,7 @@ static int mtu_max_set(const char *val, const struct kernel_param *kp)
53 if (ret) 56 if (ret)
54 return ret; 57 return ret;
55 58
56 if (mtu_max < 68 || mtu_max > IEEE80211_MAX_DATA_LEN_DMG) 59 if (mtu_max < 68 || mtu_max > WIL_MAX_ETH_MTU)
57 ret = -EINVAL; 60 ret = -EINVAL;
58 61
59 return ret; 62 return ret;
@@ -135,12 +138,14 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
135 138
136static void wil_disconnect_cid(struct wil6210_priv *wil, int cid, 139static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
137 u16 reason_code, bool from_event) 140 u16 reason_code, bool from_event)
141__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
138{ 142{
139 uint i; 143 uint i;
140 struct net_device *ndev = wil_to_ndev(wil); 144 struct net_device *ndev = wil_to_ndev(wil);
141 struct wireless_dev *wdev = wil->wdev; 145 struct wireless_dev *wdev = wil->wdev;
142 struct wil_sta_info *sta = &wil->sta[cid]; 146 struct wil_sta_info *sta = &wil->sta[cid];
143 147
148 might_sleep();
144 wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid, 149 wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
145 sta->status); 150 sta->status);
146 151
@@ -163,15 +168,14 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
163 168
164 for (i = 0; i < WIL_STA_TID_NUM; i++) { 169 for (i = 0; i < WIL_STA_TID_NUM; i++) {
165 struct wil_tid_ampdu_rx *r; 170 struct wil_tid_ampdu_rx *r;
166 unsigned long flags;
167 171
168 spin_lock_irqsave(&sta->tid_rx_lock, flags); 172 spin_lock_bh(&sta->tid_rx_lock);
169 173
170 r = sta->tid_rx[i]; 174 r = sta->tid_rx[i];
171 sta->tid_rx[i] = NULL; 175 sta->tid_rx[i] = NULL;
172 wil_tid_ampdu_rx_free(wil, r); 176 wil_tid_ampdu_rx_free(wil, r);
173 177
174 spin_unlock_irqrestore(&sta->tid_rx_lock, flags); 178 spin_unlock_bh(&sta->tid_rx_lock);
175 } 179 }
176 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { 180 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
177 if (wil->vring2cid_tid[i][0] == cid) 181 if (wil->vring2cid_tid[i][0] == cid)
@@ -188,34 +192,47 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
188 struct wireless_dev *wdev = wil->wdev; 192 struct wireless_dev *wdev = wil->wdev;
189 193
190 might_sleep(); 194 might_sleep();
191 if (bssid) { 195 wil_dbg_misc(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
196 reason_code, from_event ? "+" : "-");
197
198 /* Cases are:
199 * - disconnect single STA, still connected
200 * - disconnect single STA, already disconnected
201 * - disconnect all
202 *
203 * For "disconnect all", there are 2 options:
204 * - bssid == NULL
205 * - bssid is our MAC address
206 */
207 if (bssid && memcmp(ndev->dev_addr, bssid, ETH_ALEN)) {
192 cid = wil_find_cid(wil, bssid); 208 cid = wil_find_cid(wil, bssid);
193 wil_dbg_misc(wil, "%s(%pM, CID %d)\n", __func__, bssid, cid); 209 wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
194 } else { 210 bssid, cid, reason_code);
195 wil_dbg_misc(wil, "%s(all)\n", __func__); 211 if (cid >= 0) /* disconnect 1 peer */
196 } 212 wil_disconnect_cid(wil, cid, reason_code, from_event);
197 213 } else { /* all */
198 if (cid >= 0) /* disconnect 1 peer */ 214 wil_dbg_misc(wil, "Disconnect all\n");
199 wil_disconnect_cid(wil, cid, reason_code, from_event);
200 else /* disconnect all */
201 for (cid = 0; cid < WIL6210_MAX_CID; cid++) 215 for (cid = 0; cid < WIL6210_MAX_CID; cid++)
202 wil_disconnect_cid(wil, cid, reason_code, from_event); 216 wil_disconnect_cid(wil, cid, reason_code, from_event);
217 }
203 218
204 /* link state */ 219 /* link state */
205 switch (wdev->iftype) { 220 switch (wdev->iftype) {
206 case NL80211_IFTYPE_STATION: 221 case NL80211_IFTYPE_STATION:
207 case NL80211_IFTYPE_P2P_CLIENT: 222 case NL80211_IFTYPE_P2P_CLIENT:
208 wil_link_off(wil); 223 netif_tx_stop_all_queues(ndev);
209 if (test_bit(wil_status_fwconnected, &wil->status)) { 224 netif_carrier_off(ndev);
210 clear_bit(wil_status_fwconnected, &wil->status); 225
226 if (test_bit(wil_status_fwconnected, wil->status)) {
227 clear_bit(wil_status_fwconnected, wil->status);
211 cfg80211_disconnected(ndev, reason_code, 228 cfg80211_disconnected(ndev, reason_code,
212 NULL, 0, GFP_KERNEL); 229 NULL, 0, GFP_KERNEL);
213 } else if (test_bit(wil_status_fwconnecting, &wil->status)) { 230 } else if (test_bit(wil_status_fwconnecting, wil->status)) {
214 cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, 231 cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
215 WLAN_STATUS_UNSPECIFIED_FAILURE, 232 WLAN_STATUS_UNSPECIFIED_FAILURE,
216 GFP_KERNEL); 233 GFP_KERNEL);
217 } 234 }
218 clear_bit(wil_status_fwconnecting, &wil->status); 235 clear_bit(wil_status_fwconnecting, wil->status);
219 break; 236 break;
220 default: 237 default:
221 break; 238 break;
@@ -248,7 +265,7 @@ static void wil_scan_timer_fn(ulong x)
248{ 265{
249 struct wil6210_priv *wil = (void *)x; 266 struct wil6210_priv *wil = (void *)x;
250 267
251 clear_bit(wil_status_fwready, &wil->status); 268 clear_bit(wil_status_fwready, wil->status);
252 wil_err(wil, "Scan timeout detected, start fw error recovery\n"); 269 wil_err(wil, "Scan timeout detected, start fw error recovery\n");
253 wil->recovery_state = fw_recovery_pending; 270 wil->recovery_state = fw_recovery_pending;
254 schedule_work(&wil->fw_error_worker); 271 schedule_work(&wil->fw_error_worker);
@@ -352,6 +369,8 @@ static void wil_connect_worker(struct work_struct *work)
352 int rc; 369 int rc;
353 struct wil6210_priv *wil = container_of(work, struct wil6210_priv, 370 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
354 connect_worker); 371 connect_worker);
372 struct net_device *ndev = wil_to_ndev(wil);
373
355 int cid = wil->pending_connect_cid; 374 int cid = wil->pending_connect_cid;
356 int ringid = wil_find_free_vring(wil); 375 int ringid = wil_find_free_vring(wil);
357 376
@@ -366,7 +385,7 @@ static void wil_connect_worker(struct work_struct *work)
366 wil->pending_connect_cid = -1; 385 wil->pending_connect_cid = -1;
367 if (rc == 0) { 386 if (rc == 0) {
368 wil->sta[cid].status = wil_sta_connected; 387 wil->sta[cid].status = wil_sta_connected;
369 wil_link_on(wil); 388 netif_tx_wake_all_queues(ndev);
370 } else { 389 } else {
371 wil->sta[cid].status = wil_sta_unused; 390 wil->sta[cid].status = wil_sta_unused;
372 } 391 }
@@ -384,6 +403,9 @@ int wil_priv_init(struct wil6210_priv *wil)
384 403
385 mutex_init(&wil->mutex); 404 mutex_init(&wil->mutex);
386 mutex_init(&wil->wmi_mutex); 405 mutex_init(&wil->wmi_mutex);
406 mutex_init(&wil->back_rx_mutex);
407 mutex_init(&wil->back_tx_mutex);
408 mutex_init(&wil->probe_client_mutex);
387 409
388 init_completion(&wil->wmi_ready); 410 init_completion(&wil->wmi_ready);
389 init_completion(&wil->wmi_call); 411 init_completion(&wil->wmi_call);
@@ -396,25 +418,39 @@ int wil_priv_init(struct wil6210_priv *wil)
396 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); 418 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
397 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); 419 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
398 INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker); 420 INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
421 INIT_WORK(&wil->back_rx_worker, wil_back_rx_worker);
422 INIT_WORK(&wil->back_tx_worker, wil_back_tx_worker);
423 INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker);
399 424
400 INIT_LIST_HEAD(&wil->pending_wmi_ev); 425 INIT_LIST_HEAD(&wil->pending_wmi_ev);
426 INIT_LIST_HEAD(&wil->back_rx_pending);
427 INIT_LIST_HEAD(&wil->back_tx_pending);
428 INIT_LIST_HEAD(&wil->probe_client_pending);
401 spin_lock_init(&wil->wmi_ev_lock); 429 spin_lock_init(&wil->wmi_ev_lock);
402 init_waitqueue_head(&wil->wq); 430 init_waitqueue_head(&wil->wq);
403 431
404 wil->wmi_wq = create_singlethread_workqueue(WIL_NAME"_wmi"); 432 wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
405 if (!wil->wmi_wq) 433 if (!wil->wmi_wq)
406 return -EAGAIN; 434 return -EAGAIN;
407 435
408 wil->wmi_wq_conn = create_singlethread_workqueue(WIL_NAME"_connect"); 436 wil->wq_service = create_singlethread_workqueue(WIL_NAME "_service");
409 if (!wil->wmi_wq_conn) { 437 if (!wil->wq_service)
410 destroy_workqueue(wil->wmi_wq); 438 goto out_wmi_wq;
411 return -EAGAIN;
412 }
413 439
414 wil->last_fw_recovery = jiffies; 440 wil->last_fw_recovery = jiffies;
415 wil->itr_trsh = itr_trsh; 441 wil->tx_interframe_timeout = WIL6210_ITR_TX_INTERFRAME_TIMEOUT_DEFAULT;
442 wil->rx_interframe_timeout = WIL6210_ITR_RX_INTERFRAME_TIMEOUT_DEFAULT;
443 wil->tx_max_burst_duration = WIL6210_ITR_TX_MAX_BURST_DURATION_DEFAULT;
444 wil->rx_max_burst_duration = WIL6210_ITR_RX_MAX_BURST_DURATION_DEFAULT;
416 445
446 if (rx_ring_overflow_thrsh == WIL6210_RX_HIGH_TRSH_INIT)
447 rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_DEFAULT;
417 return 0; 448 return 0;
449
450out_wmi_wq:
451 destroy_workqueue(wil->wmi_wq);
452
453 return -EAGAIN;
418} 454}
419 455
420/** 456/**
@@ -448,7 +484,13 @@ void wil_priv_deinit(struct wil6210_priv *wil)
448 wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); 484 wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
449 mutex_unlock(&wil->mutex); 485 mutex_unlock(&wil->mutex);
450 wmi_event_flush(wil); 486 wmi_event_flush(wil);
451 destroy_workqueue(wil->wmi_wq_conn); 487 wil_back_rx_flush(wil);
488 cancel_work_sync(&wil->back_rx_worker);
489 wil_back_tx_flush(wil);
490 cancel_work_sync(&wil->back_tx_worker);
491 wil_probe_client_flush(wil);
492 cancel_work_sync(&wil->probe_client_worker);
493 destroy_workqueue(wil->wq_service);
452 destroy_workqueue(wil->wmi_wq); 494 destroy_workqueue(wil->wmi_wq);
453} 495}
454 496
@@ -478,13 +520,10 @@ static int wil_target_reset(struct wil6210_priv *wil)
478{ 520{
479 int delay = 0; 521 int delay = 0;
480 u32 x; 522 u32 x;
481 u32 rev_id; 523 bool is_reset_v2 = test_bit(hw_capability_reset_v2,
482 bool is_sparrow = (wil->board->board == WIL_BOARD_SPARROW); 524 wil->hw_capabilities);
483 525
484 wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->board->name); 526 wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
485
486 wil->hw_version = R(RGF_USER_FW_REV_ID);
487 rev_id = wil->hw_version & 0xff;
488 527
489 /* Clear MAC link up */ 528 /* Clear MAC link up */
490 S(RGF_HP_CTRL, BIT(15)); 529 S(RGF_HP_CTRL, BIT(15));
@@ -496,7 +535,7 @@ static int wil_target_reset(struct wil6210_priv *wil)
496 /* Clear Fw Download notification */ 535 /* Clear Fw Download notification */
497 C(RGF_USER_USAGE_6, BIT(0)); 536 C(RGF_USER_USAGE_6, BIT(0));
498 537
499 if (is_sparrow) { 538 if (is_reset_v2) {
500 S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN); 539 S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
501 /* XTAL stabilization should take about 3ms */ 540 /* XTAL stabilization should take about 3ms */
502 usleep_range(5000, 7000); 541 usleep_range(5000, 7000);
@@ -517,10 +556,11 @@ static int wil_target_reset(struct wil6210_priv *wil)
517 556
518 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); 557 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
519 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); 558 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
520 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, is_sparrow ? 0x000000f0 : 0x00000170); 559 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3,
560 is_reset_v2 ? 0x000000f0 : 0x00000170);
521 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00); 561 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
522 562
523 if (is_sparrow) { 563 if (is_reset_v2) {
524 W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); 564 W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
525 W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0); 565 W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
526 } 566 }
@@ -530,19 +570,14 @@ static int wil_target_reset(struct wil6210_priv *wil)
530 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); 570 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
531 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); 571 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
532 572
533 if (is_sparrow) { 573 if (is_reset_v2) {
534 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003); 574 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
535 /* reset A2 PCIE AHB */ 575 /* reset A2 PCIE AHB */
536 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); 576 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
537 } else { 577 } else {
538 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001); 578 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
539 if (rev_id == 1) { 579 W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
540 /* reset A1 BOTH PCIE AHB & PCIE RGF */ 580 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
541 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
542 } else {
543 W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
544 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
545 }
546 } 581 }
547 582
548 /* TODO: check order here!!! Erez code is different */ 583 /* TODO: check order here!!! Erez code is different */
@@ -559,8 +594,7 @@ static int wil_target_reset(struct wil6210_priv *wil)
559 } 594 }
560 } while (x != HW_MACHINE_BOOT_DONE); 595 } while (x != HW_MACHINE_BOOT_DONE);
561 596
562 /* TODO: Erez check rev_id != 1 */ 597 if (!is_reset_v2)
563 if (!is_sparrow && (rev_id != 1))
564 W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8)); 598 W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
565 599
566 C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); 600 C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -569,26 +603,6 @@ static int wil_target_reset(struct wil6210_priv *wil)
569 return 0; 603 return 0;
570} 604}
571 605
572/**
573 * wil_set_itr_trsh: - apply interrupt coalescing params
574 */
575void wil_set_itr_trsh(struct wil6210_priv *wil)
576{
577 /* disable, use usec resolution */
578 W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_EXT_TICK);
579
580 /* disable interrupt moderation for monitor
581 * to get better timestamp precision
582 */
583 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR)
584 return;
585
586 wil_info(wil, "set ITR_TRSH = %d usec\n", wil->itr_trsh);
587 W(RGF_DMA_ITR_CNT_TRSH, wil->itr_trsh);
588 W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_EN |
589 BIT_DMA_ITR_CNT_CRL_EXT_TICK); /* start it */
590}
591
592#undef R 606#undef R
593#undef W 607#undef W
594#undef S 608#undef S
@@ -629,13 +643,17 @@ int wil_reset(struct wil6210_priv *wil)
629 643
630 wil_dbg_misc(wil, "%s()\n", __func__); 644 wil_dbg_misc(wil, "%s()\n", __func__);
631 645
646 if (wil->hw_version == HW_VER_UNKNOWN)
647 return -ENODEV;
648
632 WARN_ON(!mutex_is_locked(&wil->mutex)); 649 WARN_ON(!mutex_is_locked(&wil->mutex));
633 WARN_ON(test_bit(wil_status_napi_en, &wil->status)); 650 WARN_ON(test_bit(wil_status_napi_en, wil->status));
634 651
635 cancel_work_sync(&wil->disconnect_worker); 652 cancel_work_sync(&wil->disconnect_worker);
636 wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); 653 wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
637 654
638 wil->status = 0; /* prevent NAPI from being scheduled */ 655 /* prevent NAPI from being scheduled */
656 bitmap_zero(wil->status, wil_status_last);
639 657
640 if (wil->scan_request) { 658 if (wil->scan_request) {
641 wil_dbg_misc(wil, "Abort scan_request 0x%p\n", 659 wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
@@ -649,7 +667,7 @@ int wil_reset(struct wil6210_priv *wil)
649 667
650 wmi_event_flush(wil); 668 wmi_event_flush(wil);
651 669
652 flush_workqueue(wil->wmi_wq_conn); 670 flush_workqueue(wil->wq_service);
653 flush_workqueue(wil->wmi_wq); 671 flush_workqueue(wil->wmi_wq);
654 672
655 rc = wil_target_reset(wil); 673 rc = wil_target_reset(wil);
@@ -688,6 +706,7 @@ int wil_reset(struct wil6210_priv *wil)
688 reinit_completion(&wil->wmi_ready); 706 reinit_completion(&wil->wmi_ready);
689 reinit_completion(&wil->wmi_call); 707 reinit_completion(&wil->wmi_call);
690 708
709 wil_configure_interrupt_moderation(wil);
691 wil_unmask_irq(wil); 710 wil_unmask_irq(wil);
692 711
693 /* we just started MAC, wait for FW ready */ 712 /* we just started MAC, wait for FW ready */
@@ -703,28 +722,6 @@ void wil_fw_error_recovery(struct wil6210_priv *wil)
703 schedule_work(&wil->fw_error_worker); 722 schedule_work(&wil->fw_error_worker);
704} 723}
705 724
706void wil_link_on(struct wil6210_priv *wil)
707{
708 struct net_device *ndev = wil_to_ndev(wil);
709
710 wil_dbg_misc(wil, "%s()\n", __func__);
711
712 netif_carrier_on(ndev);
713 wil_dbg_misc(wil, "netif_tx_wake : link on\n");
714 netif_tx_wake_all_queues(ndev);
715}
716
717void wil_link_off(struct wil6210_priv *wil)
718{
719 struct net_device *ndev = wil_to_ndev(wil);
720
721 wil_dbg_misc(wil, "%s()\n", __func__);
722
723 netif_tx_stop_all_queues(ndev);
724 wil_dbg_misc(wil, "netif_tx_stop : link off\n");
725 netif_carrier_off(ndev);
726}
727
728int __wil_up(struct wil6210_priv *wil) 725int __wil_up(struct wil6210_priv *wil)
729{ 726{
730 struct net_device *ndev = wil_to_ndev(wil); 727 struct net_device *ndev = wil_to_ndev(wil);
@@ -774,7 +771,7 @@ int __wil_up(struct wil6210_priv *wil)
774 wil_dbg_misc(wil, "NAPI enable\n"); 771 wil_dbg_misc(wil, "NAPI enable\n");
775 napi_enable(&wil->napi_rx); 772 napi_enable(&wil->napi_rx);
776 napi_enable(&wil->napi_tx); 773 napi_enable(&wil->napi_tx);
777 set_bit(wil_status_napi_en, &wil->status); 774 set_bit(wil_status_napi_en, wil->status);
778 775
779 if (wil->platform_ops.bus_request) 776 if (wil->platform_ops.bus_request)
780 wil->platform_ops.bus_request(wil->platform_handle, 777 wil->platform_ops.bus_request(wil->platform_handle,
@@ -807,7 +804,7 @@ int __wil_down(struct wil6210_priv *wil)
807 wil->platform_ops.bus_request(wil->platform_handle, 0); 804 wil->platform_ops.bus_request(wil->platform_handle, 0);
808 805
809 wil_disable_irq(wil); 806 wil_disable_irq(wil);
810 if (test_and_clear_bit(wil_status_napi_en, &wil->status)) { 807 if (test_and_clear_bit(wil_status_napi_en, wil->status)) {
811 napi_disable(&wil->napi_rx); 808 napi_disable(&wil->napi_rx);
812 napi_disable(&wil->napi_tx); 809 napi_disable(&wil->napi_tx);
813 wil_dbg_misc(wil, "NAPI disable\n"); 810 wil_dbg_misc(wil, "NAPI disable\n");
@@ -822,15 +819,15 @@ int __wil_down(struct wil6210_priv *wil)
822 wil->scan_request = NULL; 819 wil->scan_request = NULL;
823 } 820 }
824 821
825 if (test_bit(wil_status_fwconnected, &wil->status) || 822 if (test_bit(wil_status_fwconnected, wil->status) ||
826 test_bit(wil_status_fwconnecting, &wil->status)) 823 test_bit(wil_status_fwconnecting, wil->status))
827 wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); 824 wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
828 825
829 /* make sure wil is idle (not connected) */ 826 /* make sure wil is idle (not connected) */
830 mutex_unlock(&wil->mutex); 827 mutex_unlock(&wil->mutex);
831 while (iter--) { 828 while (iter--) {
832 int idle = !test_bit(wil_status_fwconnected, &wil->status) && 829 int idle = !test_bit(wil_status_fwconnected, wil->status) &&
833 !test_bit(wil_status_fwconnecting, &wil->status); 830 !test_bit(wil_status_fwconnecting, wil->status);
834 if (idle) 831 if (idle)
835 break; 832 break;
836 msleep(WAIT_FOR_DISCONNECT_INTERVAL_MS); 833 msleep(WAIT_FOR_DISCONNECT_INTERVAL_MS);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index e81703ca7701..ace30c1b5c64 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. 2 * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -15,7 +15,6 @@
15 */ 15 */
16 16
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18
19#include "wil6210.h" 18#include "wil6210.h"
20#include "txrx.h" 19#include "txrx.h"
21 20
@@ -122,6 +121,12 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
122 return min(tx_done, budget); 121 return min(tx_done, budget);
123} 122}
124 123
124static void wil_dev_setup(struct net_device *dev)
125{
126 ether_setup(dev);
127 dev->tx_queue_len = WIL_TX_Q_LEN_DEFAULT;
128}
129
125void *wil_if_alloc(struct device *dev, void __iomem *csr) 130void *wil_if_alloc(struct device *dev, void __iomem *csr)
126{ 131{
127 struct net_device *ndev; 132 struct net_device *ndev;
@@ -153,7 +158,7 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
153 ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels; 158 ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
154 cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT); 159 cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
155 160
156 ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup); 161 ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
157 if (!ndev) { 162 if (!ndev) {
158 dev_err(dev, "alloc_netdev_mqs failed\n"); 163 dev_err(dev, "alloc_netdev_mqs failed\n");
159 rc = -ENOMEM; 164 rc = -ENOMEM;
@@ -174,7 +179,7 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
174 netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx, 179 netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
175 WIL6210_NAPI_BUDGET); 180 WIL6210_NAPI_BUDGET);
176 181
177 wil_link_off(wil); 182 netif_tx_stop_all_queues(ndev);
178 183
179 return wil; 184 return wil;
180 185
@@ -217,8 +222,6 @@ int wil_if_add(struct wil6210_priv *wil)
217 return rc; 222 return rc;
218 } 223 }
219 224
220 wil_link_off(wil);
221
222 return 0; 225 return 0;
223} 226}
224 227
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 66626a8ee728..3dd26709ccb2 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -31,6 +31,46 @@ static bool debug_fw; /* = false; */
31module_param(debug_fw, bool, S_IRUGO); 31module_param(debug_fw, bool, S_IRUGO);
32MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug"); 32MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug");
33 33
34static
35void wil_set_capabilities(struct wil6210_priv *wil)
36{
37 u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
38
39 bitmap_zero(wil->hw_capabilities, hw_capability_last);
40
41 switch (rev_id) {
42 case JTAG_DEV_ID_MARLON_B0:
43 wil->hw_name = "Marlon B0";
44 wil->hw_version = HW_VER_MARLON_B0;
45 break;
46 case JTAG_DEV_ID_SPARROW_A0:
47 wil->hw_name = "Sparrow A0";
48 wil->hw_version = HW_VER_SPARROW_A0;
49 break;
50 case JTAG_DEV_ID_SPARROW_A1:
51 wil->hw_name = "Sparrow A1";
52 wil->hw_version = HW_VER_SPARROW_A1;
53 break;
54 case JTAG_DEV_ID_SPARROW_B0:
55 wil->hw_name = "Sparrow B0";
56 wil->hw_version = HW_VER_SPARROW_B0;
57 break;
58 default:
59 wil_err(wil, "Unknown board hardware 0x%08x\n", rev_id);
60 wil->hw_name = "Unknown";
61 wil->hw_version = HW_VER_UNKNOWN;
62 }
63
64 wil_info(wil, "Board hardware is %s\n", wil->hw_name);
65
66 if (wil->hw_version >= HW_VER_SPARROW_A0)
67 set_bit(hw_capability_reset_v2, wil->hw_capabilities);
68
69 if (wil->hw_version >= HW_VER_SPARROW_B0)
70 set_bit(hw_capability_advanced_itr_moderation,
71 wil->hw_capabilities);
72}
73
34void wil_disable_irq(struct wil6210_priv *wil) 74void wil_disable_irq(struct wil6210_priv *wil)
35{ 75{
36 int irq = wil->pdev->irq; 76 int irq = wil->pdev->irq;
@@ -149,12 +189,11 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
149 struct wil6210_priv *wil; 189 struct wil6210_priv *wil;
150 struct device *dev = &pdev->dev; 190 struct device *dev = &pdev->dev;
151 void __iomem *csr; 191 void __iomem *csr;
152 struct wil_board *board = (struct wil_board *)id->driver_data;
153 int rc; 192 int rc;
154 193
155 /* check HW */ 194 /* check HW */
156 dev_info(&pdev->dev, WIL_NAME 195 dev_info(&pdev->dev, WIL_NAME
157 " \"%s\" device found [%04x:%04x] (rev %x)\n", board->name, 196 " device found [%04x:%04x] (rev %x)\n",
158 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); 197 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
159 198
160 if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) { 199 if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
@@ -204,8 +243,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
204 243
205 pci_set_drvdata(pdev, wil); 244 pci_set_drvdata(pdev, wil);
206 wil->pdev = pdev; 245 wil->pdev = pdev;
207 wil->board = board; 246 wil_set_capabilities(wil);
208
209 wil6210_clear_irq(wil); 247 wil6210_clear_irq(wil);
210 248
211 wil->platform_handle = 249 wil->platform_handle =
@@ -266,23 +304,10 @@ static void wil_pcie_remove(struct pci_dev *pdev)
266 pci_disable_device(pdev); 304 pci_disable_device(pdev);
267} 305}
268 306
269static const struct wil_board wil_board_marlon = {
270 .board = WIL_BOARD_MARLON,
271 .name = "marlon",
272};
273
274static const struct wil_board wil_board_sparrow = {
275 .board = WIL_BOARD_SPARROW,
276 .name = "sparrow",
277};
278
279static const struct pci_device_id wil6210_pcie_ids[] = { 307static const struct pci_device_id wil6210_pcie_ids[] = {
280 { PCI_DEVICE(0x1ae9, 0x0301), 308 { PCI_DEVICE(0x1ae9, 0x0301) },
281 .driver_data = (kernel_ulong_t)&wil_board_marlon }, 309 { PCI_DEVICE(0x1ae9, 0x0310) },
282 { PCI_DEVICE(0x1ae9, 0x0310), 310 { PCI_DEVICE(0x1ae9, 0x0302) }, /* same as above, firmware broken */
283 .driver_data = (kernel_ulong_t)&wil_board_sparrow },
284 { PCI_DEVICE(0x1ae9, 0x0302), /* same as above, firmware broken */
285 .driver_data = (kernel_ulong_t)&wil_board_sparrow },
286 { /* end: all zeroes */ }, 311 { /* end: all zeroes */ },
287}; 312};
288MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids); 313MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 489cb73d139b..ca10dcf0986e 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 Qualcomm Atheros, Inc. 2 * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -89,7 +89,9 @@ static void wil_reorder_release(struct wil6210_priv *wil,
89 } 89 }
90} 90}
91 91
92/* called in NAPI context */
92void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb) 93void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
94__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
93{ 95{
94 struct net_device *ndev = wil_to_ndev(wil); 96 struct net_device *ndev = wil_to_ndev(wil);
95 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 97 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
@@ -97,22 +99,26 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
97 int cid = wil_rxdesc_cid(d); 99 int cid = wil_rxdesc_cid(d);
98 int mid = wil_rxdesc_mid(d); 100 int mid = wil_rxdesc_mid(d);
99 u16 seq = wil_rxdesc_seq(d); 101 u16 seq = wil_rxdesc_seq(d);
102 int mcast = wil_rxdesc_mcast(d);
100 struct wil_sta_info *sta = &wil->sta[cid]; 103 struct wil_sta_info *sta = &wil->sta[cid];
101 struct wil_tid_ampdu_rx *r; 104 struct wil_tid_ampdu_rx *r;
102 u16 hseq; 105 u16 hseq;
103 int index; 106 int index;
104 unsigned long flags;
105 107
106 wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x\n", 108 wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
107 mid, cid, tid, seq); 109 mid, cid, tid, seq, mcast);
108 110
109 spin_lock_irqsave(&sta->tid_rx_lock, flags); 111 if (unlikely(mcast)) {
112 wil_netif_rx_any(skb, ndev);
113 return;
114 }
115
116 spin_lock(&sta->tid_rx_lock);
110 117
111 r = sta->tid_rx[tid]; 118 r = sta->tid_rx[tid];
112 if (!r) { 119 if (!r) {
113 spin_unlock_irqrestore(&sta->tid_rx_lock, flags);
114 wil_netif_rx_any(skb, ndev); 120 wil_netif_rx_any(skb, ndev);
115 return; 121 goto out;
116 } 122 }
117 123
118 hseq = r->head_seq_num; 124 hseq = r->head_seq_num;
@@ -121,13 +127,24 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
121 * reported, and data Rx, few packets may be pass up before reorder 127 * reported, and data Rx, few packets may be pass up before reorder
122 * buffer get allocated. Catch up by pretending SSN is what we 128 * buffer get allocated. Catch up by pretending SSN is what we
123 * see in the 1-st Rx packet 129 * see in the 1-st Rx packet
130 *
131 * Another scenario, Rx get delayed and we got packet from before
132 * BACK. Pass it to the stack and wait.
124 */ 133 */
125 if (r->first_time) { 134 if (r->first_time) {
126 r->first_time = false; 135 r->first_time = false;
127 if (seq != r->head_seq_num) { 136 if (seq != r->head_seq_num) {
128 wil_err(wil, "Error: 1-st frame with wrong sequence" 137 if (seq_less(seq, r->head_seq_num)) {
129 " %d, should be %d. Fixing...\n", seq, 138 wil_err(wil,
130 r->head_seq_num); 139 "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
140 seq, r->head_seq_num);
141 r->first_time = true;
142 wil_netif_rx_any(skb, ndev);
143 goto out;
144 }
145 wil_err(wil,
146 "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
147 seq, r->head_seq_num);
131 r->head_seq_num = seq; 148 r->head_seq_num = seq;
132 r->ssn = seq; 149 r->ssn = seq;
133 } 150 }
@@ -179,7 +196,7 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
179 wil_reorder_release(wil, r); 196 wil_reorder_release(wil, r);
180 197
181out: 198out:
182 spin_unlock_irqrestore(&sta->tid_rx_lock, flags); 199 spin_unlock(&sta->tid_rx_lock);
183} 200}
184 201
185struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, 202struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
@@ -219,3 +236,241 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
219 kfree(r->reorder_time); 236 kfree(r->reorder_time);
220 kfree(r); 237 kfree(r);
221} 238}
239
240/* ADDBA processing */
241static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
242{
243 u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
244 (mtu_max + WIL_MAX_MPDU_OVERHEAD));
245
246 if (!req_agg_wsize)
247 return max_agg_size;
248
249 return min(max_agg_size, req_agg_wsize);
250}
251
252/* Block Ack - Rx side (recipient */
253int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
254 u8 dialog_token, __le16 ba_param_set,
255 __le16 ba_timeout, __le16 ba_seq_ctrl)
256{
257 struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
258
259 if (!req)
260 return -ENOMEM;
261
262 req->cidxtid = cidxtid;
263 req->dialog_token = dialog_token;
264 req->ba_param_set = le16_to_cpu(ba_param_set);
265 req->ba_timeout = le16_to_cpu(ba_timeout);
266 req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
267
268 mutex_lock(&wil->back_rx_mutex);
269 list_add_tail(&req->list, &wil->back_rx_pending);
270 mutex_unlock(&wil->back_rx_mutex);
271
272 queue_work(wil->wq_service, &wil->back_rx_worker);
273
274 return 0;
275}
276
277static void wil_back_rx_handle(struct wil6210_priv *wil,
278 struct wil_back_rx *req)
279__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
280{
281 struct wil_sta_info *sta;
282 u8 cid, tid;
283 u16 agg_wsize = 0;
284 /* bit 0: A-MSDU supported
285 * bit 1: policy (should be 0 for us)
286 * bits 2..5: TID
287 * bits 6..15: buffer size
288 */
289 u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
290 bool agg_amsdu = !!(req->ba_param_set & BIT(0));
291 int ba_policy = req->ba_param_set & BIT(1);
292 u16 agg_timeout = req->ba_timeout;
293 u16 status = WLAN_STATUS_SUCCESS;
294 u16 ssn = req->ba_seq_ctrl >> 4;
295 struct wil_tid_ampdu_rx *r;
296 int rc;
297
298 might_sleep();
299 parse_cidxtid(req->cidxtid, &cid, &tid);
300
301 /* sanity checks */
302 if (cid >= WIL6210_MAX_CID) {
303 wil_err(wil, "BACK: invalid CID %d\n", cid);
304 return;
305 }
306
307 sta = &wil->sta[cid];
308 if (sta->status != wil_sta_connected) {
309 wil_err(wil, "BACK: CID %d not connected\n", cid);
310 return;
311 }
312
313 wil_dbg_wmi(wil,
314 "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
315 cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
316 agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
317
318 /* apply policies */
319 if (ba_policy) {
320 wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
321 status = WLAN_STATUS_INVALID_QOS_PARAM;
322 }
323 if (status == WLAN_STATUS_SUCCESS)
324 agg_wsize = wil_agg_size(wil, req_agg_wsize);
325
326 rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
327 agg_amsdu, agg_wsize, agg_timeout);
328 if (rc || (status != WLAN_STATUS_SUCCESS))
329 return;
330
331 /* apply */
332 r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
333 spin_lock_bh(&sta->tid_rx_lock);
334 wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
335 sta->tid_rx[tid] = r;
336 spin_unlock_bh(&sta->tid_rx_lock);
337}
338
339void wil_back_rx_flush(struct wil6210_priv *wil)
340{
341 struct wil_back_rx *evt, *t;
342
343 wil_dbg_misc(wil, "%s()\n", __func__);
344
345 mutex_lock(&wil->back_rx_mutex);
346
347 list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
348 list_del(&evt->list);
349 kfree(evt);
350 }
351
352 mutex_unlock(&wil->back_rx_mutex);
353}
354
355/* Retrieve next ADDBA request from the pending list */
356static struct list_head *next_back_rx(struct wil6210_priv *wil)
357{
358 struct list_head *ret = NULL;
359
360 mutex_lock(&wil->back_rx_mutex);
361
362 if (!list_empty(&wil->back_rx_pending)) {
363 ret = wil->back_rx_pending.next;
364 list_del(ret);
365 }
366
367 mutex_unlock(&wil->back_rx_mutex);
368
369 return ret;
370}
371
372void wil_back_rx_worker(struct work_struct *work)
373{
374 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
375 back_rx_worker);
376 struct wil_back_rx *evt;
377 struct list_head *lh;
378
379 while ((lh = next_back_rx(wil)) != NULL) {
380 evt = list_entry(lh, struct wil_back_rx, list);
381
382 wil_back_rx_handle(wil, evt);
383 kfree(evt);
384 }
385}
386
387/* BACK - Tx (originator) side */
388static void wil_back_tx_handle(struct wil6210_priv *wil,
389 struct wil_back_tx *req)
390{
391 struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
392 int rc;
393
394 if (txdata->addba_in_progress) {
395 wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
396 req->ringid);
397 return;
398 }
399 if (txdata->agg_wsize) {
400 wil_dbg_misc(wil,
401 "ADDBA for vring[%d] already established wsize %d\n",
402 req->ringid, txdata->agg_wsize);
403 return;
404 }
405 txdata->addba_in_progress = true;
406 rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
407 if (rc)
408 txdata->addba_in_progress = false;
409}
410
411static struct list_head *next_back_tx(struct wil6210_priv *wil)
412{
413 struct list_head *ret = NULL;
414
415 mutex_lock(&wil->back_tx_mutex);
416
417 if (!list_empty(&wil->back_tx_pending)) {
418 ret = wil->back_tx_pending.next;
419 list_del(ret);
420 }
421
422 mutex_unlock(&wil->back_tx_mutex);
423
424 return ret;
425}
426
427void wil_back_tx_worker(struct work_struct *work)
428{
429 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
430 back_tx_worker);
431 struct wil_back_tx *evt;
432 struct list_head *lh;
433
434 while ((lh = next_back_tx(wil)) != NULL) {
435 evt = list_entry(lh, struct wil_back_tx, list);
436
437 wil_back_tx_handle(wil, evt);
438 kfree(evt);
439 }
440}
441
442void wil_back_tx_flush(struct wil6210_priv *wil)
443{
444 struct wil_back_tx *evt, *t;
445
446 wil_dbg_misc(wil, "%s()\n", __func__);
447
448 mutex_lock(&wil->back_tx_mutex);
449
450 list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
451 list_del(&evt->list);
452 kfree(evt);
453 }
454
455 mutex_unlock(&wil->back_tx_mutex);
456}
457
458int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
459{
460 struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
461
462 if (!req)
463 return -ENOMEM;
464
465 req->ringid = ringid;
466 req->agg_wsize = wil_agg_size(wil, wsize);
467 req->agg_timeout = 0;
468
469 mutex_lock(&wil->back_tx_mutex);
470 list_add_tail(&req->list, &wil->back_tx_pending);
471 mutex_unlock(&wil->back_tx_mutex);
472
473 queue_work(wil->wq_service, &wil->back_tx_worker);
474
475 return 0;
476}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index e3f8bdce5abc..8439f65db259 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -463,7 +463,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
463 * and in case of error drop the packet 463 * and in case of error drop the packet
464 * higher stack layers will handle retransmission (if required) 464 * higher stack layers will handle retransmission (if required)
465 */ 465 */
466 if (d->dma.status & RX_DMA_STATUS_L4_IDENT) { 466 if (d->dma.status & RX_DMA_STATUS_L4I) {
467 /* L4 protocol identified, csum calculated */ 467 /* L4 protocol identified, csum calculated */
468 if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) 468 if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
469 skb->ip_summed = CHECKSUM_UNNECESSARY; 469 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -581,14 +581,8 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
581 skb->protocol = htons(ETH_P_802_2); 581 skb->protocol = htons(ETH_P_802_2);
582 wil_netif_rx_any(skb, ndev); 582 wil_netif_rx_any(skb, ndev);
583 } else { 583 } else {
584 struct ethhdr *eth = (void *)skb->data;
585
586 skb->protocol = eth_type_trans(skb, ndev); 584 skb->protocol = eth_type_trans(skb, ndev);
587 585 wil_rx_reorder(wil, skb);
588 if (is_unicast_ether_addr(eth->h_dest))
589 wil_rx_reorder(wil, skb);
590 else
591 wil_netif_rx_any(skb, ndev);
592 } 586 }
593 } 587 }
594 wil_rx_refill(wil, v->size); 588 wil_rx_refill(wil, v->size);
@@ -645,7 +639,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
645 .vring_cfg = { 639 .vring_cfg = {
646 .tx_sw_ring = { 640 .tx_sw_ring = {
647 .max_mpdu_size = 641 .max_mpdu_size =
648 cpu_to_le16(mtu_max + ETH_HLEN), 642 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
649 .ring_size = cpu_to_le16(size), 643 .ring_size = cpu_to_le16(size),
650 }, 644 },
651 .ringid = id, 645 .ringid = id,
@@ -653,7 +647,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
653 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 647 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
654 .mac_ctrl = 0, 648 .mac_ctrl = 0,
655 .to_resolution = 0, 649 .to_resolution = 0,
656 .agg_max_wsize = 16, 650 .agg_max_wsize = 0,
657 .schd_params = { 651 .schd_params = {
658 .priority = cpu_to_le16(0), 652 .priority = cpu_to_le16(0),
659 .timeslot_us = cpu_to_le16(0xfff), 653 .timeslot_us = cpu_to_le16(0xfff),
@@ -677,6 +671,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
677 } 671 }
678 672
679 memset(txdata, 0, sizeof(*txdata)); 673 memset(txdata, 0, sizeof(*txdata));
674 spin_lock_init(&txdata->lock);
680 vring->size = size; 675 vring->size = size;
681 rc = wil_vring_alloc(wil, vring); 676 rc = wil_vring_alloc(wil, vring);
682 if (rc) 677 if (rc)
@@ -701,6 +696,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
701 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 696 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
702 697
703 txdata->enabled = 1; 698 txdata->enabled = 1;
699 if (wil->sta[cid].data_port_open && (agg_wsize >= 0))
700 wil_addba_tx_request(wil, id, agg_wsize);
704 701
705 return 0; 702 return 0;
706 out_free: 703 out_free:
@@ -713,6 +710,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
713void wil_vring_fini_tx(struct wil6210_priv *wil, int id) 710void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
714{ 711{
715 struct vring *vring = &wil->vring_tx[id]; 712 struct vring *vring = &wil->vring_tx[id];
713 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
716 714
717 WARN_ON(!mutex_is_locked(&wil->mutex)); 715 WARN_ON(!mutex_is_locked(&wil->mutex));
718 716
@@ -721,12 +719,15 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
721 719
722 wil_dbg_misc(wil, "%s() id=%d\n", __func__, id); 720 wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
723 721
722 spin_lock_bh(&txdata->lock);
723 txdata->enabled = 0; /* no Tx can be in progress or start anew */
724 spin_unlock_bh(&txdata->lock);
724 /* make sure NAPI won't touch this vring */ 725 /* make sure NAPI won't touch this vring */
725 wil->vring_tx_data[id].enabled = 0; 726 if (test_bit(wil_status_napi_en, wil->status))
726 if (test_bit(wil_status_napi_en, &wil->status))
727 napi_synchronize(&wil->napi_tx); 727 napi_synchronize(&wil->napi_tx);
728 728
729 wil_vring_free(wil, vring, 1); 729 wil_vring_free(wil, vring, 1);
730 memset(txdata, 0, sizeof(*txdata));
730} 731}
731 732
732static struct vring *wil_find_tx_vring(struct wil6210_priv *wil, 733static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
@@ -773,6 +774,38 @@ static void wil_set_da_for_vring(struct wil6210_priv *wil,
773 774
774static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 775static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
775 struct sk_buff *skb); 776 struct sk_buff *skb);
777
778static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
779 struct sk_buff *skb)
780{
781 struct vring *v;
782 int i;
783 u8 cid;
784
785 /* In the STA mode, it is expected to have only 1 VRING
786 * for the AP we connected to.
787 * find 1-st vring and see whether it is eligible for data
788 */
789 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
790 v = &wil->vring_tx[i];
791 if (!v->va)
792 continue;
793
794 cid = wil->vring2cid_tid[i][0];
795 if (!wil->sta[cid].data_port_open &&
796 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
797 break;
798
799 wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
800
801 return v;
802 }
803
804 wil_dbg_txrx(wil, "Tx while no vrings active?\n");
805
806 return NULL;
807}
808
776/* 809/*
777 * Find 1-st vring and return it; set dest address for this vring in skb 810 * Find 1-st vring and return it; set dest address for this vring in skb
778 * duplicate skb and send it to other active vrings 811 * duplicate skb and send it to other active vrings
@@ -843,9 +876,6 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
843 d->mac.d[1] = 0; 876 d->mac.d[1] = 0;
844 d->mac.d[2] = 0; 877 d->mac.d[2] = 0;
845 d->mac.ucode_cmd = 0; 878 d->mac.ucode_cmd = 0;
846 /* use dst index 0 */
847 d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
848 (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
849 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */ 879 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
850 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) | 880 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
851 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS); 881 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
@@ -908,8 +938,8 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
908 return 0; 938 return 0;
909} 939}
910 940
911static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 941static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
912 struct sk_buff *skb) 942 struct sk_buff *skb)
913{ 943{
914 struct device *dev = wil_to_dev(wil); 944 struct device *dev = wil_to_dev(wil);
915 struct vring_tx_desc dd, *d = &dd; 945 struct vring_tx_desc dd, *d = &dd;
@@ -925,18 +955,21 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
925 955
926 wil_dbg_txrx(wil, "%s()\n", __func__); 956 wil_dbg_txrx(wil, "%s()\n", __func__);
927 957
958 if (unlikely(!txdata->enabled))
959 return -EINVAL;
960
928 if (avail < 1 + nr_frags) { 961 if (avail < 1 + nr_frags) {
929 wil_err_ratelimited(wil, 962 wil_err_ratelimited(wil,
930 "Tx ring full. No space for %d fragments\n", 963 "Tx ring[%2d] full. No space for %d fragments\n",
931 1 + nr_frags); 964 vring_index, 1 + nr_frags);
932 return -ENOMEM; 965 return -ENOMEM;
933 } 966 }
934 _d = &vring->va[i].tx; 967 _d = &vring->va[i].tx;
935 968
936 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 969 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
937 970
938 wil_dbg_txrx(wil, "Tx skb %d bytes 0x%p -> %pad\n", skb_headlen(skb), 971 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
939 skb->data, &pa); 972 skb_headlen(skb), skb->data, &pa);
940 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, 973 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
941 skb->data, skb_headlen(skb), false); 974 skb->data, skb_headlen(skb), false);
942 975
@@ -947,15 +980,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
947 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); 980 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
948 /* Process TCP/UDP checksum offloading */ 981 /* Process TCP/UDP checksum offloading */
949 if (wil_tx_desc_offload_cksum_set(wil, d, skb)) { 982 if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
950 wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n", 983 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
951 vring_index); 984 vring_index);
952 goto dma_error; 985 goto dma_error;
953 } 986 }
954 987
955 vring->ctx[i].nr_frags = nr_frags; 988 vring->ctx[i].nr_frags = nr_frags;
956 wil_tx_desc_set_nr_frags(d, nr_frags); 989 wil_tx_desc_set_nr_frags(d, nr_frags);
957 if (nr_frags)
958 *_d = *d;
959 990
960 /* middle segments */ 991 /* middle segments */
961 for (; f < nr_frags; f++) { 992 for (; f < nr_frags; f++) {
@@ -963,6 +994,10 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
963 &skb_shinfo(skb)->frags[f]; 994 &skb_shinfo(skb)->frags[f];
964 int len = skb_frag_size(frag); 995 int len = skb_frag_size(frag);
965 996
997 *_d = *d;
998 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
999 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1000 (const void *)d, sizeof(*d), false);
966 i = (swhead + f + 1) % vring->size; 1001 i = (swhead + f + 1) % vring->size;
967 _d = &vring->va[i].tx; 1002 _d = &vring->va[i].tx;
968 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 1003 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
@@ -976,13 +1011,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
976 * it will succeed here too 1011 * it will succeed here too
977 */ 1012 */
978 wil_tx_desc_offload_cksum_set(wil, d, skb); 1013 wil_tx_desc_offload_cksum_set(wil, d, skb);
979 *_d = *d;
980 } 1014 }
981 /* for the last seg only */ 1015 /* for the last seg only */
982 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); 1016 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
983 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS); 1017 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
984 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 1018 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
985 *_d = *d; 1019 *_d = *d;
1020 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
1021 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1022 (const void *)d, sizeof(*d), false);
986 1023
987 /* hold reference to skb 1024 /* hold reference to skb
988 * to prevent skb release before accounting 1025 * to prevent skb release before accounting
@@ -990,15 +1027,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
990 */ 1027 */
991 vring->ctx[i].skb = skb_get(skb); 1028 vring->ctx[i].skb = skb_get(skb);
992 1029
993 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
994 (const void *)d, sizeof(*d), false);
995
996 if (wil_vring_is_empty(vring)) /* performance monitoring */ 1030 if (wil_vring_is_empty(vring)) /* performance monitoring */
997 txdata->idle += get_cycles() - txdata->last_idle; 1031 txdata->idle += get_cycles() - txdata->last_idle;
998 1032
999 /* advance swhead */ 1033 /* advance swhead */
1000 wil_vring_advance_head(vring, nr_frags + 1); 1034 wil_vring_advance_head(vring, nr_frags + 1);
1001 wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead); 1035 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
1036 vring->swhead);
1002 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); 1037 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
1003 iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); 1038 iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
1004 1039
@@ -1025,6 +1060,19 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1025 return -EINVAL; 1060 return -EINVAL;
1026} 1061}
1027 1062
1063static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
1064 struct sk_buff *skb)
1065{
1066 int vring_index = vring - wil->vring_tx;
1067 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
1068 int rc;
1069
1070 spin_lock(&txdata->lock);
1071 rc = __wil_tx_vring(wil, vring, skb);
1072 spin_unlock(&txdata->lock);
1073 return rc;
1074}
1075
1028netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1076netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1029{ 1077{
1030 struct wil6210_priv *wil = ndev_to_wil(ndev); 1078 struct wil6210_priv *wil = ndev_to_wil(ndev);
@@ -1034,14 +1082,14 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1034 int rc; 1082 int rc;
1035 1083
1036 wil_dbg_txrx(wil, "%s()\n", __func__); 1084 wil_dbg_txrx(wil, "%s()\n", __func__);
1037 if (!test_bit(wil_status_fwready, &wil->status)) { 1085 if (!test_bit(wil_status_fwready, wil->status)) {
1038 if (!pr_once_fw) { 1086 if (!pr_once_fw) {
1039 wil_err(wil, "FW not ready\n"); 1087 wil_err(wil, "FW not ready\n");
1040 pr_once_fw = true; 1088 pr_once_fw = true;
1041 } 1089 }
1042 goto drop; 1090 goto drop;
1043 } 1091 }
1044 if (!test_bit(wil_status_fwconnected, &wil->status)) { 1092 if (!test_bit(wil_status_fwconnected, wil->status)) {
1045 wil_err(wil, "FW not connected\n"); 1093 wil_err(wil, "FW not connected\n");
1046 goto drop; 1094 goto drop;
1047 } 1095 }
@@ -1052,15 +1100,19 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1052 pr_once_fw = false; 1100 pr_once_fw = false;
1053 1101
1054 /* find vring */ 1102 /* find vring */
1055 if (is_unicast_ether_addr(eth->h_dest)) 1103 if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
1056 vring = wil_find_tx_vring(wil, skb); 1104 /* in STA mode (ESS), all to same VRING */
1057 else 1105 vring = wil_find_tx_vring_sta(wil, skb);
1058 vring = wil_tx_bcast(wil, skb); 1106 } else { /* direct communication, find matching VRING */
1107 if (is_unicast_ether_addr(eth->h_dest))
1108 vring = wil_find_tx_vring(wil, skb);
1109 else
1110 vring = wil_tx_bcast(wil, skb);
1111 }
1059 if (!vring) { 1112 if (!vring) {
1060 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); 1113 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
1061 goto drop; 1114 goto drop;
1062 } 1115 }
1063
1064 /* set up vring entry */ 1116 /* set up vring entry */
1065 rc = wil_tx_vring(wil, vring, skb); 1117 rc = wil_tx_vring(wil, vring, skb);
1066 1118
@@ -1087,6 +1139,22 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1087 return NET_XMIT_DROP; 1139 return NET_XMIT_DROP;
1088} 1140}
1089 1141
1142static inline bool wil_need_txstat(struct sk_buff *skb)
1143{
1144 struct ethhdr *eth = (void *)skb->data;
1145
1146 return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
1147 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
1148}
1149
1150static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
1151{
1152 if (unlikely(wil_need_txstat(skb)))
1153 skb_complete_wifi_ack(skb, acked);
1154 else
1155 acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
1156}
1157
1090/** 1158/**
1091 * Clean up transmitted skb's from the Tx VRING 1159 * Clean up transmitted skb's from the Tx VRING
1092 * 1160 *
@@ -1147,10 +1215,10 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
1147 trace_wil6210_tx_done(ringid, vring->swtail, dmalen, 1215 trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
1148 d->dma.error); 1216 d->dma.error);
1149 wil_dbg_txrx(wil, 1217 wil_dbg_txrx(wil,
1150 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n", 1218 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
1151 vring->swtail, dmalen, d->dma.status, 1219 ringid, vring->swtail, dmalen,
1152 d->dma.error); 1220 d->dma.status, d->dma.error);
1153 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4, 1221 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
1154 (const void *)d, sizeof(*d), false); 1222 (const void *)d, sizeof(*d), false);
1155 1223
1156 wil_txdesc_unmap(dev, d, ctx); 1224 wil_txdesc_unmap(dev, d, ctx);
@@ -1165,8 +1233,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
1165 ndev->stats.tx_errors++; 1233 ndev->stats.tx_errors++;
1166 stats->tx_errors++; 1234 stats->tx_errors++;
1167 } 1235 }
1168 1236 wil_consume_skb(skb, d->dma.error == 0);
1169 dev_kfree_skb_any(skb);
1170 } 1237 }
1171 memset(ctx, 0, sizeof(*ctx)); 1238 memset(ctx, 0, sizeof(*ctx));
1172 /* There is no need to touch HW descriptor: 1239 /* There is no need to touch HW descriptor:
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 630aeb5fa7f4..d90c8aa20c15 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -20,17 +20,15 @@
20#define BUF_SW_OWNED (1) 20#define BUF_SW_OWNED (1)
21#define BUF_HW_OWNED (0) 21#define BUF_HW_OWNED (0)
22 22
23/* size of max. Tx/Rx buffers, as supported by FW */ 23/* default size of MAC Tx/Rx buffers */
24#define TXRX_BUF_LEN_DEFAULT (2242) 24#define TXRX_BUF_LEN_DEFAULT (2048)
25 25
26/* how many bytes to reserve for rtap header? */ 26/* how many bytes to reserve for rtap header? */
27#define WIL6210_RTAP_SIZE (128) 27#define WIL6210_RTAP_SIZE (128)
28 28
29/* Tx/Rx path */ 29/* Tx/Rx path */
30 30
31/* 31/* Common representation of physical address in Vring */
32 * Common representation of physical address in Vring
33 */
34struct vring_dma_addr { 32struct vring_dma_addr {
35 __le32 addr_low; 33 __le32 addr_low;
36 __le16 addr_high; 34 __le16 addr_high;
@@ -49,11 +47,10 @@ static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
49 addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa)); 47 addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
50} 48}
51 49
52/* 50/* Tx descriptor - MAC part
53 * Tx descriptor - MAC part
54 * [dword 0] 51 * [dword 0]
55 * bit 0.. 9 : lifetime_expiry_value:10 52 * bit 0.. 9 : lifetime_expiry_value:10
56 * bit 10 : interrup_en:1 53 * bit 10 : interrupt_en:1
57 * bit 11 : status_en:1 54 * bit 11 : status_en:1
58 * bit 12..13 : txss_override:2 55 * bit 12..13 : txss_override:2
59 * bit 14 : timestamp_insertion:1 56 * bit 14 : timestamp_insertion:1
@@ -61,15 +58,12 @@ static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
61 * bit 16..21 : reserved0:6 58 * bit 16..21 : reserved0:6
62 * bit 22..26 : mcs_index:5 59 * bit 22..26 : mcs_index:5
63 * bit 27 : mcs_en:1 60 * bit 27 : mcs_en:1
64 * bit 28..29 : reserved1:2 61 * bit 28..30 : reserved1:3
65 * bit 30 : reserved2:1
66 * bit 31 : sn_preserved:1 62 * bit 31 : sn_preserved:1
67 * [dword 1] 63 * [dword 1]
68 * bit 0.. 3 : pkt_mode:4 64 * bit 0.. 3 : pkt_mode:4
69 * bit 4 : pkt_mode_en:1 65 * bit 4 : pkt_mode_en:1
70 * bit 5.. 7 : reserved0:3 66 * bit 5..14 : reserved0:10
71 * bit 8..13 : reserved1:6
72 * bit 14 : reserved2:1
73 * bit 15 : ack_policy_en:1 67 * bit 15 : ack_policy_en:1
74 * bit 16..19 : dst_index:4 68 * bit 16..19 : dst_index:4
75 * bit 20 : dst_index_en:1 69 * bit 20 : dst_index_en:1
@@ -80,7 +74,7 @@ static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
80 * [dword 2] 74 * [dword 2]
81 * bit 0.. 7 : num_of_descriptors:8 75 * bit 0.. 7 : num_of_descriptors:8
82 * bit 8..17 : reserved:10 76 * bit 8..17 : reserved:10
83 * bit 18..19 : l2_translation_type:2 77 * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
84 * bit 20 : snap_hdr_insertion_en:1 78 * bit 20 : snap_hdr_insertion_en:1
85 * bit 21 : vlan_removal_en:1 79 * bit 21 : vlan_removal_en:1
86 * bit 22..31 : reserved0:10 80 * bit 22..31 : reserved0:10
@@ -247,6 +241,46 @@ struct vring_tx_mac {
247 241
248#define TX_DMA_STATUS_DU BIT(0) 242#define TX_DMA_STATUS_DU BIT(0)
249 243
244/* Tx descriptor - DMA part
245 * [dword 0]
246 * bit 0.. 7 : l4_length:8 layer 4 length
247 * bit 8 : cmd_eop:1 This descriptor is the last one in the packet
248 * bit 9 : reserved
249 * bit 10 : cmd_dma_it:1 immediate interrupt
250 * bit 11..12 : SBD - Segment Buffer Details
251 * 00 - Header Segment
252 * 01 - First Data Segment
253 * 10 - Medium Data Segment
254 * 11 - Last Data Segment
255 * bit 13 : TSE - TCP Segmentation Enable
256 * bit 14 : IIC - Directs the HW to Insert IPv4 Checksum
257 * bit 15 : ITC - Directs the HW to Insert TCP/UDP Checksum
258 * bit 16..20 : QID - The target QID that the packet should be stored
259 * in the MAC.
260 * bit 21 : PO - Pseudo header Offload:
261 * 0 - Use the pseudo header value from the TCP checksum field
262 * 1- Calculate Pseudo header Checksum
263 * bit 22 : NC - No UDP Checksum
264 * bit 23..29 : reserved
265 * bit 30..31 : L4T - Layer 4 Type: 00 - UDP , 10 - TCP , 10, 11 - Reserved
266 * If L4Len equal 0, no L4 at all
267 * [dword 1]
268 * bit 0..31 : addr_low:32 The payload buffer low address
269 * [dword 2]
270 * bit 0..15 : addr_high:16 The payload buffer high address
271 * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
272 * offload feature
273 * bit 24..30 : mac_length:7
274 * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
275 * [dword 3]
276 * [byte 12] error
277 * bit 0 2 : mac_status:3
278 * bit 3 7 : reserved:5
279 * [byte 13] status
280 * bit 0 : DU:1 Descriptor Used
281 * bit 1 7 : reserved:7
282 * [word 7] length
283 */
250struct vring_tx_dma { 284struct vring_tx_dma {
251 u32 d0; 285 u32 d0;
252 struct vring_dma_addr addr; 286 struct vring_dma_addr addr;
@@ -257,45 +291,45 @@ struct vring_tx_dma {
257 __le16 length; 291 __le16 length;
258} __packed; 292} __packed;
259 293
260/* 294/* Rx descriptor - MAC part
261 * Rx descriptor - MAC part
262 * [dword 0] 295 * [dword 0]
263 * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field 296 * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field
264 * bit 4.. 6 : connection_id:3 :The Source index that was found during 297 * bit 4.. 6 : cid:3 The Source index that was found during parsing the TA.
265 * Parsing the TA. This field is used to define the source of the packet 298 * This field is used to define the source of the packet
266 * bit 7 : reserved:1 299 * bit 7 : reserved:1
267 * bit 8.. 9 : mac_id:2 : The MAC virtual Ring number (always zero) 300 * bit 8.. 9 : mid:2 The MAC virtual number
268 * bit 10..11 : frame_type:2 : The FC Control (b3-2) - MPDU Type 301 * bit 10..11 : frame_type:2 : The FC (b3-2) - MPDU Type
269 * (management, data, control and extension) 302 * (management, data, control and extension)
270 * bit 12..15 : frame_subtype:4 : The FC Control (b7-4) - Frame Subtype 303 * bit 12..15 : frame_subtype:4 : The FC (b7-4) - Frame Subtype
271 * bit 16..27 : seq_number:12 The received Sequence number field 304 * bit 16..27 : seq_number:12 The received Sequence number field
272 * bit 28..31 : extended:4 extended subtype 305 * bit 28..31 : extended:4 extended subtype
273 * [dword 1] 306 * [dword 1]
274 * bit 0.. 3 : reserved 307 * bit 0.. 3 : reserved
275 * bit 4.. 5 : key_id:2 308 * bit 4.. 5 : key_id:2
276 * bit 6 : decrypt_bypass:1 309 * bit 6 : decrypt_bypass:1
277 * bit 7 : security:1 310 * bit 7 : security:1 FC (b14)
278 * bit 8.. 9 : ds_bits:2 311 * bit 8.. 9 : ds_bits:2 FC (b9-8)
279 * bit 10 : a_msdu_present:1 from qos header 312 * bit 10 : a_msdu_present:1 QoS (b7)
280 * bit 11 : a_msdu_type:1 from qos header 313 * bit 11 : a_msdu_type:1 QoS (b8)
281 * bit 12 : a_mpdu:1 part of AMPDU aggregation 314 * bit 12 : a_mpdu:1 part of AMPDU aggregation
282 * bit 13 : broadcast:1 315 * bit 13 : broadcast:1
283 * bit 14 : mutlicast:1 316 * bit 14 : mutlicast:1
284 * bit 15 : reserved:1 317 * bit 15 : reserved:1
285 * bit 16..20 : rx_mac_qid:5 The Queue Identifier that the packet 318 * bit 16..20 : rx_mac_qid:5 The Queue Identifier that the packet
286 * is received from 319 * is received from
287 * bit 21..24 : mcs:4 320 * bit 21..24 : mcs:4
288 * bit 25..28 : mic_icr:4 321 * bit 25..28 : mic_icr:4 this signal tells the DMA to assert an interrupt
322 * after it writes the packet
289 * bit 29..31 : reserved:3 323 * bit 29..31 : reserved:3
290 * [dword 2] 324 * [dword 2]
291 * bit 0.. 2 : time_slot:3 The timeslot that the MPDU is received 325 * bit 0.. 2 : time_slot:3 The timeslot that the MPDU is received
292 * bit 3 : fc_protocol_ver:1 The FC Control (b0) - Protocol Version 326 * bit 3.. 4 : fc_protocol_ver:1 The FC (b1-0) - Protocol Version
293 * bit 4 : fc_order:1 The FC Control (b15) -Order 327 * bit 5 : fc_order:1 The FC Control (b15) -Order
294 * bit 5.. 7 : qos_ack_policy:3 The QoS (b6-5) ack policy Field 328 * bit 6.. 7 : qos_ack_policy:2 The QoS (b6-5) ack policy Field
295 * bit 8 : esop:1 The QoS (b4) ESOP field 329 * bit 8 : esop:1 The QoS (b4) ESOP field
296 * bit 9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG field 330 * bit 9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG field
297 * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved field 331 * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved field
298 * bit 15 : qos_ac_constraint:1 332 * bit 15 : qos_ac_constraint:1 QoS (b15)
299 * bit 16..31 : pn_15_0:16 low 2 bytes of PN 333 * bit 16..31 : pn_15_0:16 low 2 bytes of PN
300 * [dword 3] 334 * [dword 3]
301 * bit 0..31 : pn_47_16:32 high 4 bytes of PN 335 * bit 0..31 : pn_47_16:32 high 4 bytes of PN
@@ -308,35 +342,46 @@ struct vring_rx_mac {
308 u32 pn_47_16; 342 u32 pn_47_16;
309} __packed; 343} __packed;
310 344
311/* 345/* Rx descriptor - DMA part
312 * Rx descriptor - DMA part
313 * [dword 0] 346 * [dword 0]
314 * bit 0.. 7 : l4_length:8 layer 4 length 347 * bit 0.. 7 : l4_length:8 layer 4 length. The field is only valid if
315 * bit 8.. 9 : reserved:2 348 * L4I bit is set
316 * bit 10 : cmd_dma_it:1 349 * bit 8 : cmd_eop:1 set to 1
350 * bit 9 : cmd_rt:1 set to 1
351 * bit 10 : cmd_dma_it:1 immediate interrupt
317 * bit 11..15 : reserved:5 352 * bit 11..15 : reserved:5
318 * bit 16..29 : phy_info_length:14 353 * bit 16..29 : phy_info_length:14 It is valid when the PII is set.
354 * When the FFM bit is set bits 29-27 are used for for
355 * Flex Filter Match. Matching Index to one of the L2
356 * EtherType Flex Filter
319 * bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field 357 * bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
358 * 00 - UDP, 01 - TCP, 10, 11 - reserved
320 * [dword 1] 359 * [dword 1]
321 * bit 0..31 : addr_low:32 The payload buffer low address 360 * bit 0..31 : addr_low:32 The payload buffer low address
322 * [dword 2] 361 * [dword 2]
323 * bit 0..15 : addr_high:16 The payload buffer high address 362 * bit 0..15 : addr_high:16 The payload buffer high address
324 * bit 16..23 : ip_length:8 363 * bit 16..23 : ip_length:8 The filed is valid only if the L3I bit is set
325 * bit 24..30 : mac_length:7 364 * bit 24..30 : mac_length:7
326 * bit 31 : ip_version:1 365 * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
327 * [dword 3] 366 * [dword 3]
328 * [byte 12] error 367 * [byte 12] error
368 * bit 0 : FCS:1
369 * bit 1 : MIC:1
370 * bit 2 : Key miss:1
371 * bit 3 : Replay:1
372 * bit 4 : L3:1 IPv4 checksum
373 * bit 5 : L4:1 TCP/UDP checksum
374 * bit 6 7 : reserved:2
329 * [byte 13] status 375 * [byte 13] status
330 * bit 0 : du:1 376 * bit 0 : DU:1 Descriptor Used
331 * bit 1 : eop:1 377 * bit 1 : EOP:1 The descriptor indicates the End of Packet
332 * bit 2 : error:1 378 * bit 2 : error:1
333 * bit 3 : mi:1 379 * bit 3 : MI:1 MAC Interrupt is asserted (according to parser decision)
334 * bit 4 : l3_identified:1 380 * bit 4 : L3I:1 L3 identified and checksum calculated
335 * bit 5 : l4_identified:1 381 * bit 5 : L4I:1 L4 identified and checksum calculated
336 * bit 6 : phy_info_included:1 382 * bit 6 : PII:1 PHY Info Included in the packet
337 * bit 7 : reserved:1 383 * bit 7 : FFM:1 EtherType Flex Filter Match
338 * [word 7] length 384 * [word 7] length
339 *
340 */ 385 */
341 386
342#define RX_DMA_D0_CMD_DMA_IT BIT(10) 387#define RX_DMA_D0_CMD_DMA_IT BIT(10)
@@ -349,9 +394,9 @@ struct vring_rx_mac {
349#define RX_DMA_STATUS_DU BIT(0) 394#define RX_DMA_STATUS_DU BIT(0)
350#define RX_DMA_STATUS_ERROR BIT(2) 395#define RX_DMA_STATUS_ERROR BIT(2)
351 396
352#define RX_DMA_STATUS_L3_IDENT BIT(4) 397#define RX_DMA_STATUS_L3I BIT(4)
353#define RX_DMA_STATUS_L4_IDENT BIT(5) 398#define RX_DMA_STATUS_L4I BIT(5)
354#define RX_DMA_STATUS_PHY_INFO BIT(6) 399#define RX_DMA_STATUS_PHY_INFO BIT(6)
355 400
356struct vring_rx_dma { 401struct vring_rx_dma {
357 u32 d0; 402 u32 d0;
@@ -423,6 +468,11 @@ static inline int wil_rxdesc_mcs(struct vring_rx_desc *d)
423 return WIL_GET_BITS(d->mac.d1, 21, 24); 468 return WIL_GET_BITS(d->mac.d1, 21, 24);
424} 469}
425 470
471static inline int wil_rxdesc_mcast(struct vring_rx_desc *d)
472{
473 return WIL_GET_BITS(d->mac.d1, 13, 14);
474}
475
426static inline int wil_rxdesc_phy_length(struct vring_rx_desc *d) 476static inline int wil_rxdesc_phy_length(struct vring_rx_desc *d)
427{ 477{
428 return WIL_GET_BITS(d->dma.d0, 16, 29); 478 return WIL_GET_BITS(d->dma.d0, 16, 29);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index c6ec5b99ac7d..94611568fc9a 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. 2 * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -25,19 +25,14 @@
25 25
26extern bool no_fw_recovery; 26extern bool no_fw_recovery;
27extern unsigned int mtu_max; 27extern unsigned int mtu_max;
28extern unsigned short rx_ring_overflow_thrsh;
29extern int agg_wsize;
28 30
29#define WIL_NAME "wil6210" 31#define WIL_NAME "wil6210"
30#define WIL_FW_NAME "wil6210.fw" 32#define WIL_FW_NAME "wil6210.fw"
31 33
32#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ 34#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
33 35
34struct wil_board {
35 int board;
36#define WIL_BOARD_MARLON (1)
37#define WIL_BOARD_SPARROW (2)
38 const char * const name;
39};
40
41/** 36/**
42 * extract bits [@b0:@b1] (inclusive) from the value @x 37 * extract bits [@b0:@b1] (inclusive) from the value @x
43 * it should be @b0 <= @b1, or result is incorrect 38 * it should be @b0 <= @b1, or result is incorrect
@@ -49,21 +44,50 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
49 44
50#define WIL6210_MEM_SIZE (2*1024*1024UL) 45#define WIL6210_MEM_SIZE (2*1024*1024UL)
51 46
52#define WIL_RX_RING_SIZE_ORDER_DEFAULT (9) 47#define WIL_TX_Q_LEN_DEFAULT (4000)
53#define WIL_TX_RING_SIZE_ORDER_DEFAULT (9) 48#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
49#define WIL_TX_RING_SIZE_ORDER_DEFAULT (10)
54/* limit ring size in range [32..32k] */ 50/* limit ring size in range [32..32k] */
55#define WIL_RING_SIZE_ORDER_MIN (5) 51#define WIL_RING_SIZE_ORDER_MIN (5)
56#define WIL_RING_SIZE_ORDER_MAX (15) 52#define WIL_RING_SIZE_ORDER_MAX (15)
57#define WIL6210_MAX_TX_RINGS (24) /* HW limit */ 53#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
58#define WIL6210_MAX_CID (8) /* HW limit */ 54#define WIL6210_MAX_CID (8) /* HW limit */
59#define WIL6210_NAPI_BUDGET (16) /* arbitrary */ 55#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
56#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
57#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
58/* Hardware offload block adds the following:
59 * 26 bytes - 3-address QoS data header
60 * 8 bytes - IV + EIV (for GCMP)
61 * 8 bytes - SNAP
62 * 16 bytes - MIC (for GCMP)
63 * 4 bytes - CRC
64 */
65#define WIL_MAX_MPDU_OVERHEAD (62)
66
67/* Calculate MAC buffer size for the firmware. It includes all overhead,
68 * as it will go over the air, and need to be 8 byte aligned
69 */
70static inline u32 wil_mtu2macbuf(u32 mtu)
71{
72 return ALIGN(mtu + WIL_MAX_MPDU_OVERHEAD, 8);
73}
74
75/* MTU for Ethernet need to take into account 8-byte SNAP header
76 * to be added when encapsulating Ethernet frame into 802.11
77 */
78#define WIL_MAX_ETH_MTU (IEEE80211_MAX_DATA_LEN_DMG - 8)
60/* Max supported by wil6210 value for interrupt threshold is 5sec. */ 79/* Max supported by wil6210 value for interrupt threshold is 5sec. */
61#define WIL6210_ITR_TRSH_MAX (5000000) 80#define WIL6210_ITR_TRSH_MAX (5000000)
62#define WIL6210_ITR_TRSH_DEFAULT (300) /* usec */ 81#define WIL6210_ITR_TX_INTERFRAME_TIMEOUT_DEFAULT (13) /* usec */
82#define WIL6210_ITR_RX_INTERFRAME_TIMEOUT_DEFAULT (13) /* usec */
83#define WIL6210_ITR_TX_MAX_BURST_DURATION_DEFAULT (500) /* usec */
84#define WIL6210_ITR_RX_MAX_BURST_DURATION_DEFAULT (500) /* usec */
63#define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */ 85#define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */
64#define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000) 86#define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000)
65#define WIL6210_SCAN_TO msecs_to_jiffies(10000) 87#define WIL6210_SCAN_TO msecs_to_jiffies(10000)
66 88#define WIL6210_RX_HIGH_TRSH_INIT (0)
89#define WIL6210_RX_HIGH_TRSH_DEFAULT \
90 (1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3))
67/* Hardware definitions begin */ 91/* Hardware definitions begin */
68 92
69/* 93/*
@@ -135,7 +159,7 @@ struct RGF_ICR {
135 #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1) 159 #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
136 #define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */ 160 #define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */
137 161
138/* Interrupt moderation control */ 162/* Legacy interrupt moderation control (before Sparrow v2)*/
139#define RGF_DMA_ITR_CNT_TRSH (0x881c5c) 163#define RGF_DMA_ITR_CNT_TRSH (0x881c5c)
140#define RGF_DMA_ITR_CNT_DATA (0x881c60) 164#define RGF_DMA_ITR_CNT_DATA (0x881c60)
141#define RGF_DMA_ITR_CNT_CRL (0x881c64) 165#define RGF_DMA_ITR_CNT_CRL (0x881c64)
@@ -145,6 +169,46 @@ struct RGF_ICR {
145 #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3) 169 #define BIT_DMA_ITR_CNT_CRL_CLR BIT(3)
146 #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4) 170 #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH BIT(4)
147 171
172/* New (sparrow v2+) interrupt moderation control */
173#define RGF_DMA_ITR_TX_DESQ_NO_MOD (0x881d40)
174#define RGF_DMA_ITR_TX_CNT_TRSH (0x881d34)
175#define RGF_DMA_ITR_TX_CNT_DATA (0x881d38)
176#define RGF_DMA_ITR_TX_CNT_CTL (0x881d3c)
177 #define BIT_DMA_ITR_TX_CNT_CTL_EN BIT(0)
178 #define BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL BIT(1)
179 #define BIT_DMA_ITR_TX_CNT_CTL_FOREVER BIT(2)
180 #define BIT_DMA_ITR_TX_CNT_CTL_CLR BIT(3)
181 #define BIT_DMA_ITR_TX_CNT_CTL_REACHED_TRESH BIT(4)
182 #define BIT_DMA_ITR_TX_CNT_CTL_CROSS_EN BIT(5)
183 #define BIT_DMA_ITR_TX_CNT_CTL_FREE_RUNNIG BIT(6)
184#define RGF_DMA_ITR_TX_IDL_CNT_TRSH (0x881d60)
185#define RGF_DMA_ITR_TX_IDL_CNT_DATA (0x881d64)
186#define RGF_DMA_ITR_TX_IDL_CNT_CTL (0x881d68)
187 #define BIT_DMA_ITR_TX_IDL_CNT_CTL_EN BIT(0)
188 #define BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL BIT(1)
189 #define BIT_DMA_ITR_TX_IDL_CNT_CTL_FOREVER BIT(2)
190 #define BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR BIT(3)
191 #define BIT_DMA_ITR_TX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
192#define RGF_DMA_ITR_RX_DESQ_NO_MOD (0x881d50)
193#define RGF_DMA_ITR_RX_CNT_TRSH (0x881d44)
194#define RGF_DMA_ITR_RX_CNT_DATA (0x881d48)
195#define RGF_DMA_ITR_RX_CNT_CTL (0x881d4c)
196 #define BIT_DMA_ITR_RX_CNT_CTL_EN BIT(0)
197 #define BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL BIT(1)
198 #define BIT_DMA_ITR_RX_CNT_CTL_FOREVER BIT(2)
199 #define BIT_DMA_ITR_RX_CNT_CTL_CLR BIT(3)
200 #define BIT_DMA_ITR_RX_CNT_CTL_REACHED_TRESH BIT(4)
201 #define BIT_DMA_ITR_RX_CNT_CTL_CROSS_EN BIT(5)
202 #define BIT_DMA_ITR_RX_CNT_CTL_FREE_RUNNIG BIT(6)
203#define RGF_DMA_ITR_RX_IDL_CNT_TRSH (0x881d54)
204#define RGF_DMA_ITR_RX_IDL_CNT_DATA (0x881d58)
205#define RGF_DMA_ITR_RX_IDL_CNT_CTL (0x881d5c)
206 #define BIT_DMA_ITR_RX_IDL_CNT_CTL_EN BIT(0)
207 #define BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL BIT(1)
208 #define BIT_DMA_ITR_RX_IDL_CNT_CTL_FOREVER BIT(2)
209 #define BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR BIT(3)
210 #define BIT_DMA_ITR_RX_IDL_CNT_CTL_REACHED_TRESH BIT(4)
211
148#define RGF_DMA_PSEUDO_CAUSE (0x881c68) 212#define RGF_DMA_PSEUDO_CAUSE (0x881c68)
149#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c) 213#define RGF_DMA_PSEUDO_CAUSE_MASK_SW (0x881c6c)
150#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70) 214#define RGF_DMA_PSEUDO_CAUSE_MASK_FW (0x881c70)
@@ -164,6 +228,20 @@ struct RGF_ICR {
164#define RGF_CAF_PLL_LOCK_STATUS (0x88afec) 228#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
165 #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) 229 #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
166 230
231#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
232 #define JTAG_DEV_ID_MARLON_B0 (0x0612072f)
233 #define JTAG_DEV_ID_SPARROW_A0 (0x0632072f)
234 #define JTAG_DEV_ID_SPARROW_A1 (0x1632072f)
235 #define JTAG_DEV_ID_SPARROW_B0 (0x2632072f)
236
237enum {
238 HW_VER_UNKNOWN,
239 HW_VER_MARLON_B0, /* JTAG_DEV_ID_MARLON_B0 */
240 HW_VER_SPARROW_A0, /* JTAG_DEV_ID_SPARROW_A0 */
241 HW_VER_SPARROW_A1, /* JTAG_DEV_ID_SPARROW_A1 */
242 HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
243};
244
167/* popular locations */ 245/* popular locations */
168#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD) 246#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
169#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \ 247#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
@@ -303,6 +381,11 @@ struct vring {
303struct vring_tx_data { 381struct vring_tx_data {
304 int enabled; 382 int enabled;
305 cycles_t idle, last_idle, begin; 383 cycles_t idle, last_idle, begin;
384 u8 agg_wsize; /* agreed aggregation window, 0 - no agg */
385 u16 agg_timeout;
386 u8 agg_amsdu;
387 bool addba_in_progress; /* if set, agg_xxx is for request in progress */
388 spinlock_t lock;
306}; 389};
307 390
308enum { /* for wil6210_priv.status */ 391enum { /* for wil6210_priv.status */
@@ -313,6 +396,7 @@ enum { /* for wil6210_priv.status */
313 wil_status_reset_done, 396 wil_status_reset_done,
314 wil_status_irqen, /* FIXME: interrupts enabled - for debug */ 397 wil_status_irqen, /* FIXME: interrupts enabled - for debug */
315 wil_status_napi_en, /* NAPI enabled protected by wil->mutex */ 398 wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
399 wil_status_last /* keep last */
316}; 400};
317 401
318struct pci_dev; 402struct pci_dev;
@@ -397,15 +481,46 @@ enum {
397 fw_recovery_running = 2, 481 fw_recovery_running = 2,
398}; 482};
399 483
484enum {
485 hw_capability_reset_v2 = 0,
486 hw_capability_advanced_itr_moderation = 1,
487 hw_capability_last
488};
489
490struct wil_back_rx {
491 struct list_head list;
492 /* request params, converted to CPU byte order - what we asked for */
493 u8 cidxtid;
494 u8 dialog_token;
495 u16 ba_param_set;
496 u16 ba_timeout;
497 u16 ba_seq_ctrl;
498};
499
500struct wil_back_tx {
501 struct list_head list;
502 /* request params, converted to CPU byte order - what we asked for */
503 u8 ringid;
504 u8 agg_wsize;
505 u16 agg_timeout;
506};
507
508struct wil_probe_client_req {
509 struct list_head list;
510 u64 cookie;
511 u8 cid;
512};
513
400struct wil6210_priv { 514struct wil6210_priv {
401 struct pci_dev *pdev; 515 struct pci_dev *pdev;
402 int n_msi; 516 int n_msi;
403 struct wireless_dev *wdev; 517 struct wireless_dev *wdev;
404 void __iomem *csr; 518 void __iomem *csr;
405 ulong status; 519 DECLARE_BITMAP(status, wil_status_last);
406 u32 fw_version; 520 u32 fw_version;
407 u32 hw_version; 521 u32 hw_version;
408 struct wil_board *board; 522 const char *hw_name;
523 DECLARE_BITMAP(hw_capabilities, hw_capability_last);
409 u8 n_mids; /* number of additional MIDs as reported by FW */ 524 u8 n_mids; /* number of additional MIDs as reported by FW */
410 u32 recovery_count; /* num of FW recovery attempts in a short time */ 525 u32 recovery_count; /* num of FW recovery attempts in a short time */
411 u32 recovery_state; /* FW recovery state machine */ 526 u32 recovery_state; /* FW recovery state machine */
@@ -415,7 +530,11 @@ struct wil6210_priv {
415 u32 monitor_flags; 530 u32 monitor_flags;
416 u32 secure_pcp; /* create secure PCP? */ 531 u32 secure_pcp; /* create secure PCP? */
417 int sinfo_gen; 532 int sinfo_gen;
418 u32 itr_trsh; 533 /* interrupt moderation */
534 u32 tx_max_burst_duration;
535 u32 tx_interframe_timeout;
536 u32 rx_max_burst_duration;
537 u32 rx_interframe_timeout;
419 /* cached ISR registers */ 538 /* cached ISR registers */
420 u32 isr_misc; 539 u32 isr_misc;
421 /* mailbox related */ 540 /* mailbox related */
@@ -429,7 +548,7 @@ struct wil6210_priv {
429 u16 reply_size; 548 u16 reply_size;
430 struct workqueue_struct *wmi_wq; /* for deferred calls */ 549 struct workqueue_struct *wmi_wq; /* for deferred calls */
431 struct work_struct wmi_event_worker; 550 struct work_struct wmi_event_worker;
432 struct workqueue_struct *wmi_wq_conn; /* for connect worker */ 551 struct workqueue_struct *wq_service;
433 struct work_struct connect_worker; 552 struct work_struct connect_worker;
434 struct work_struct disconnect_worker; 553 struct work_struct disconnect_worker;
435 struct work_struct fw_error_worker; /* for FW error recovery */ 554 struct work_struct fw_error_worker; /* for FW error recovery */
@@ -445,6 +564,17 @@ struct wil6210_priv {
445 spinlock_t wmi_ev_lock; 564 spinlock_t wmi_ev_lock;
446 struct napi_struct napi_rx; 565 struct napi_struct napi_rx;
447 struct napi_struct napi_tx; 566 struct napi_struct napi_tx;
567 /* BACK */
568 struct list_head back_rx_pending;
569 struct mutex back_rx_mutex; /* protect @back_rx_pending */
570 struct work_struct back_rx_worker;
571 struct list_head back_tx_pending;
572 struct mutex back_tx_mutex; /* protect @back_tx_pending */
573 struct work_struct back_tx_worker;
574 /* keep alive */
575 struct list_head probe_client_pending;
576 struct mutex probe_client_mutex; /* protect @probe_client_pending */
577 struct work_struct probe_client_worker;
448 /* DMA related */ 578 /* DMA related */
449 struct vring vring_rx; 579 struct vring vring_rx;
450 struct vring vring_tx[WIL6210_MAX_TX_RINGS]; 580 struct vring vring_tx[WIL6210_MAX_TX_RINGS];
@@ -529,11 +659,8 @@ void wil_if_remove(struct wil6210_priv *wil);
529int wil_priv_init(struct wil6210_priv *wil); 659int wil_priv_init(struct wil6210_priv *wil);
530void wil_priv_deinit(struct wil6210_priv *wil); 660void wil_priv_deinit(struct wil6210_priv *wil);
531int wil_reset(struct wil6210_priv *wil); 661int wil_reset(struct wil6210_priv *wil);
532void wil_set_itr_trsh(struct wil6210_priv *wil);
533void wil_fw_error_recovery(struct wil6210_priv *wil); 662void wil_fw_error_recovery(struct wil6210_priv *wil);
534void wil_set_recovery_state(struct wil6210_priv *wil, int state); 663void wil_set_recovery_state(struct wil6210_priv *wil, int state);
535void wil_link_on(struct wil6210_priv *wil);
536void wil_link_off(struct wil6210_priv *wil);
537int wil_up(struct wil6210_priv *wil); 664int wil_up(struct wil6210_priv *wil);
538int __wil_up(struct wil6210_priv *wil); 665int __wil_up(struct wil6210_priv *wil);
539int wil_down(struct wil6210_priv *wil); 666int wil_down(struct wil6210_priv *wil);
@@ -567,12 +694,26 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
567int wmi_rxon(struct wil6210_priv *wil, bool on); 694int wmi_rxon(struct wil6210_priv *wil, bool on);
568int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); 695int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
569int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason); 696int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason);
697int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout);
698int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason);
699int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason);
700int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
701 u16 status, bool amsdu, u16 agg_wsize, u16 timeout);
702int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
703 u8 dialog_token, __le16 ba_param_set,
704 __le16 ba_timeout, __le16 ba_seq_ctrl);
705void wil_back_rx_worker(struct work_struct *work);
706void wil_back_rx_flush(struct wil6210_priv *wil);
707int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
708void wil_back_tx_worker(struct work_struct *work);
709void wil_back_tx_flush(struct wil6210_priv *wil);
570 710
571void wil6210_clear_irq(struct wil6210_priv *wil); 711void wil6210_clear_irq(struct wil6210_priv *wil);
572int wil6210_init_irq(struct wil6210_priv *wil, int irq); 712int wil6210_init_irq(struct wil6210_priv *wil, int irq);
573void wil6210_fini_irq(struct wil6210_priv *wil, int irq); 713void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
574void wil_mask_irq(struct wil6210_priv *wil); 714void wil_mask_irq(struct wil6210_priv *wil);
575void wil_unmask_irq(struct wil6210_priv *wil); 715void wil_unmask_irq(struct wil6210_priv *wil);
716void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
576void wil_disable_irq(struct wil6210_priv *wil); 717void wil_disable_irq(struct wil6210_priv *wil);
577void wil_enable_irq(struct wil6210_priv *wil); 718void wil_enable_irq(struct wil6210_priv *wil);
578int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, 719int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
@@ -592,6 +733,8 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
592int wmi_pcp_stop(struct wil6210_priv *wil); 733int wmi_pcp_stop(struct wil6210_priv *wil);
593void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, 734void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
594 u16 reason_code, bool from_event); 735 u16 reason_code, bool from_event);
736void wil_probe_client_flush(struct wil6210_priv *wil);
737void wil_probe_client_worker(struct work_struct *work);
595 738
596int wil_rx_init(struct wil6210_priv *wil, u16 size); 739int wil_rx_init(struct wil6210_priv *wil, u16 size);
597void wil_rx_fini(struct wil6210_priv *wil); 740void wil_rx_fini(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index 8f1d78f8a74d..976a071ba74e 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -17,10 +17,6 @@
17#include "linux/device.h" 17#include "linux/device.h"
18#include "wil_platform.h" 18#include "wil_platform.h"
19 19
20#ifdef CONFIG_WIL6210_PLATFORM_MSM
21#include "wil_platform_msm.h"
22#endif
23
24/** 20/**
25 * wil_platform_init() - wil6210 platform module init 21 * wil_platform_init() - wil6210 platform module init
26 * 22 *
@@ -37,13 +33,7 @@ void *wil_platform_init(struct device *dev, struct wil_platform_ops *ops)
37 return NULL; 33 return NULL;
38 } 34 }
39 35
40#ifdef CONFIG_WIL6210_PLATFORM_MSM 36 /* platform specific init functions should be called here */
41 handle = wil_platform_msm_init(dev, ops);
42 if (handle)
43 return handle;
44#endif
45
46 /* other platform specific init functions should be called here */
47 37
48 return handle; 38 return handle;
49} 39}
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform_msm.c b/drivers/net/wireless/ath/wil6210/wil_platform_msm.c
deleted file mode 100644
index b354a743240d..000000000000
--- a/drivers/net/wireless/ath/wil6210/wil_platform_msm.c
+++ /dev/null
@@ -1,257 +0,0 @@
1/*
2 * Copyright (c) 2014 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/of.h>
18#include <linux/slab.h>
19#include <linux/msm-bus.h>
20
21#include "wil_platform.h"
22#include "wil_platform_msm.h"
23
24/**
25 * struct wil_platform_msm - wil6210 msm platform module info
26 *
27 * @dev: device object
28 * @msm_bus_handle: handle for using msm_bus API
29 * @pdata: bus scale info retrieved from DT
30 */
31struct wil_platform_msm {
32 struct device *dev;
33 uint32_t msm_bus_handle;
34 struct msm_bus_scale_pdata *pdata;
35};
36
37#define KBTOB(a) (a * 1000ULL)
38
39/**
40 * wil_platform_get_pdata() - Generate bus client data from device tree
41 * provided by clients.
42 *
43 * dev: device object
44 * of_node: Device tree node to extract information from
45 *
46 * The function returns a valid pointer to the allocated bus-scale-pdata
47 * if the vectors were correctly read from the client's device node.
48 * Any error in reading or parsing the device node will return NULL
49 * to the caller.
50 */
51static struct msm_bus_scale_pdata *wil_platform_get_pdata(
52 struct device *dev,
53 struct device_node *of_node)
54{
55 struct msm_bus_scale_pdata *pdata;
56 struct msm_bus_paths *usecase;
57 int i, j, ret, len;
58 unsigned int num_usecases, num_paths, mem_size;
59 const uint32_t *vec_arr;
60 struct msm_bus_vectors *vectors;
61
62 /* first read num_usecases and num_paths so we can calculate
63 * amount of memory to allocate
64 */
65 ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
66 &num_usecases);
67 if (ret) {
68 dev_err(dev, "Error: num-usecases not found\n");
69 return NULL;
70 }
71
72 ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
73 &num_paths);
74 if (ret) {
75 dev_err(dev, "Error: num_paths not found\n");
76 return NULL;
77 }
78
79 /* pdata memory layout:
80 * msm_bus_scale_pdata
81 * msm_bus_paths[num_usecases]
82 * msm_bus_vectors[num_usecases][num_paths]
83 */
84 mem_size = sizeof(struct msm_bus_scale_pdata) +
85 sizeof(struct msm_bus_paths) * num_usecases +
86 sizeof(struct msm_bus_vectors) * num_usecases * num_paths;
87
88 pdata = kzalloc(mem_size, GFP_KERNEL);
89 if (!pdata)
90 return NULL;
91
92 ret = of_property_read_string(of_node, "qcom,msm-bus,name",
93 &pdata->name);
94 if (ret) {
95 dev_err(dev, "Error: Client name not found\n");
96 goto err;
97 }
98
99 if (of_property_read_bool(of_node, "qcom,msm-bus,active-only")) {
100 pdata->active_only = 1;
101 } else {
102 dev_info(dev, "active_only flag absent.\n");
103 dev_info(dev, "Using dual context by default\n");
104 }
105
106 pdata->num_usecases = num_usecases;
107 pdata->usecase = (struct msm_bus_paths *)(pdata + 1);
108
109 vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len);
110 if (vec_arr == NULL) {
111 dev_err(dev, "Error: Vector array not found\n");
112 goto err;
113 }
114
115 if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) {
116 dev_err(dev, "Error: Length-error on getting vectors\n");
117 goto err;
118 }
119
120 vectors = (struct msm_bus_vectors *)(pdata->usecase + num_usecases);
121 for (i = 0; i < num_usecases; i++) {
122 usecase = &pdata->usecase[i];
123 usecase->num_paths = num_paths;
124 usecase->vectors = &vectors[i];
125
126 for (j = 0; j < num_paths; j++) {
127 int index = ((i * num_paths) + j) * 4;
128
129 usecase->vectors[j].src = be32_to_cpu(vec_arr[index]);
130 usecase->vectors[j].dst =
131 be32_to_cpu(vec_arr[index + 1]);
132 usecase->vectors[j].ab = (uint64_t)
133 KBTOB(be32_to_cpu(vec_arr[index + 2]));
134 usecase->vectors[j].ib = (uint64_t)
135 KBTOB(be32_to_cpu(vec_arr[index + 3]));
136 }
137 }
138
139 return pdata;
140
141err:
142 kfree(pdata);
143
144 return NULL;
145}
146
147/* wil_platform API (callbacks) */
148
149static int wil_platform_bus_request(void *handle,
150 uint32_t kbps /* KBytes/Sec */)
151{
152 int rc, i;
153 struct wil_platform_msm *msm = (struct wil_platform_msm *)handle;
154 int vote = 0; /* vote 0 in case requested kbps cannot be satisfied */
155 struct msm_bus_paths *usecase;
156 uint32_t usecase_kbps;
157 uint32_t min_kbps = ~0;
158
159 /* find the lowest usecase that is bigger than requested kbps */
160 for (i = 0; i < msm->pdata->num_usecases; i++) {
161 usecase = &msm->pdata->usecase[i];
162 /* assume we have single path (vectors[0]). If we ever
163 * have multiple paths, need to define the behavior */
164 usecase_kbps = div64_u64(usecase->vectors[0].ib, 1000);
165 if (usecase_kbps >= kbps && usecase_kbps < min_kbps) {
166 min_kbps = usecase_kbps;
167 vote = i;
168 }
169 }
170
171 rc = msm_bus_scale_client_update_request(msm->msm_bus_handle, vote);
172 if (rc)
173 dev_err(msm->dev, "Failed msm_bus voting. kbps=%d vote=%d, rc=%d\n",
174 kbps, vote, rc);
175 else
176 /* TOOD: remove */
177 dev_info(msm->dev, "msm_bus_scale_client_update_request succeeded. kbps=%d vote=%d\n",
178 kbps, vote);
179
180 return rc;
181}
182
183static void wil_platform_uninit(void *handle)
184{
185 struct wil_platform_msm *msm = (struct wil_platform_msm *)handle;
186
187 dev_info(msm->dev, "wil_platform_uninit\n");
188
189 if (msm->msm_bus_handle)
190 msm_bus_scale_unregister_client(msm->msm_bus_handle);
191
192 kfree(msm->pdata);
193 kfree(msm);
194}
195
196static int wil_platform_msm_bus_register(struct wil_platform_msm *msm,
197 struct device_node *node)
198{
199 msm->pdata = wil_platform_get_pdata(msm->dev, node);
200 if (!msm->pdata) {
201 dev_err(msm->dev, "Failed getting DT info\n");
202 return -EINVAL;
203 }
204
205 msm->msm_bus_handle = msm_bus_scale_register_client(msm->pdata);
206 if (!msm->msm_bus_handle) {
207 dev_err(msm->dev, "Failed msm_bus registration\n");
208 return -EINVAL;
209 }
210
211 dev_info(msm->dev, "msm_bus registration succeeded! handle 0x%x\n",
212 msm->msm_bus_handle);
213
214 return 0;
215}
216
217/**
218 * wil_platform_msm_init() - wil6210 msm platform module init
219 *
220 * The function must be called before all other functions in this module.
221 * It returns a handle which is used with the rest of the API
222 *
223 */
224void *wil_platform_msm_init(struct device *dev, struct wil_platform_ops *ops)
225{
226 struct device_node *of_node;
227 struct wil_platform_msm *msm;
228 int rc;
229
230 of_node = of_find_compatible_node(NULL, NULL, "qcom,wil6210");
231 if (!of_node) {
232 /* this could mean non-msm platform */
233 dev_err(dev, "DT node not found\n");
234 return NULL;
235 }
236
237 msm = kzalloc(sizeof(*msm), GFP_KERNEL);
238 if (!msm)
239 return NULL;
240
241 msm->dev = dev;
242
243 /* register with msm_bus module for scaling requests */
244 rc = wil_platform_msm_bus_register(msm, of_node);
245 if (rc)
246 goto cleanup;
247
248 memset(ops, 0, sizeof(*ops));
249 ops->bus_request = wil_platform_bus_request;
250 ops->uninit = wil_platform_uninit;
251
252 return (void *)msm;
253
254cleanup:
255 kfree(msm);
256 return NULL;
257}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 63476c86cd0e..0f3e4334c8e3 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -23,10 +23,15 @@
23#include "wmi.h" 23#include "wmi.h"
24#include "trace.h" 24#include "trace.h"
25 25
26static uint max_assoc_sta = 1; 26static uint max_assoc_sta = WIL6210_MAX_CID;
27module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR); 27module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR);
28MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP"); 28MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
29 29
30int agg_wsize; /* = 0; */
31module_param(agg_wsize, int, S_IRUGO | S_IWUSR);
32MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
33 " 0 - use default; < 0 - don't auto-establish");
34
30/** 35/**
31 * WMI event receiving - theory of operations 36 * WMI event receiving - theory of operations
32 * 37 *
@@ -197,7 +202,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
197 202
198 might_sleep(); 203 might_sleep();
199 204
200 if (!test_bit(wil_status_fwready, &wil->status)) { 205 if (!test_bit(wil_status_fwready, wil->status)) {
201 wil_err(wil, "WMI: cannot send command while FW not ready\n"); 206 wil_err(wil, "WMI: cannot send command while FW not ready\n");
202 return -EAGAIN; 207 return -EAGAIN;
203 } 208 }
@@ -300,7 +305,7 @@ static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
300 wil_dbg_wmi(wil, "WMI: got FW ready event\n"); 305 wil_dbg_wmi(wil, "WMI: got FW ready event\n");
301 306
302 wil_set_recovery_state(wil, fw_recovery_idle); 307 wil_set_recovery_state(wil, fw_recovery_idle);
303 set_bit(wil_status_fwready, &wil->status); 308 set_bit(wil_status_fwready, wil->status);
304 /* let the reset sequence continue */ 309 /* let the reset sequence continue */
305 complete(&wil->wmi_ready); 310 complete(&wil->wmi_ready);
306} 311}
@@ -438,7 +443,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
438 443
439 if ((wdev->iftype == NL80211_IFTYPE_STATION) || 444 if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
440 (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { 445 (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
441 if (!test_bit(wil_status_fwconnecting, &wil->status)) { 446 if (!test_bit(wil_status_fwconnecting, wil->status)) {
442 wil_err(wil, "Not in connecting state\n"); 447 wil_err(wil, "Not in connecting state\n");
443 return; 448 return;
444 } 449 }
@@ -457,13 +462,12 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
457 if (assoc_req_ie) { 462 if (assoc_req_ie) {
458 sinfo.assoc_req_ies = assoc_req_ie; 463 sinfo.assoc_req_ies = assoc_req_ie;
459 sinfo.assoc_req_ies_len = assoc_req_ielen; 464 sinfo.assoc_req_ies_len = assoc_req_ielen;
460 sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
461 } 465 }
462 466
463 cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); 467 cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
464 } 468 }
465 clear_bit(wil_status_fwconnecting, &wil->status); 469 clear_bit(wil_status_fwconnecting, wil->status);
466 set_bit(wil_status_fwconnected, &wil->status); 470 set_bit(wil_status_fwconnected, wil->status);
467 471
468 /* FIXME FW can transmit only ucast frames to peer */ 472 /* FIXME FW can transmit only ucast frames to peer */
469 /* FIXME real ring_id instead of hard coded 0 */ 473 /* FIXME real ring_id instead of hard coded 0 */
@@ -471,7 +475,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
471 wil->sta[evt->cid].status = wil_sta_conn_pending; 475 wil->sta[evt->cid].status = wil_sta_conn_pending;
472 476
473 wil->pending_connect_cid = evt->cid; 477 wil->pending_connect_cid = evt->cid;
474 queue_work(wil->wmi_wq_conn, &wil->connect_worker); 478 queue_work(wil->wq_service, &wil->connect_worker);
475} 479}
476 480
477static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, 481static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
@@ -544,9 +548,24 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
544 } 548 }
545} 549}
546 550
551static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize)
552{
553 struct vring_tx_data *t;
554 int i;
555
556 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
557 if (cid != wil->vring2cid_tid[i][0])
558 continue;
559 t = &wil->vring_tx_data[i];
560 if (!t->enabled)
561 continue;
562
563 wil_addba_tx_request(wil, i, wsize);
564 }
565}
566
547static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len) 567static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
548{ 568{
549 struct net_device *ndev = wil_to_ndev(wil);
550 struct wmi_data_port_open_event *evt = d; 569 struct wmi_data_port_open_event *evt = d;
551 u8 cid = evt->cid; 570 u8 cid = evt->cid;
552 571
@@ -558,7 +577,8 @@ static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
558 } 577 }
559 578
560 wil->sta[cid].data_port_open = true; 579 wil->sta[cid].data_port_open = true;
561 netif_carrier_on(ndev); 580 if (agg_wsize >= 0)
581 wil_addba_tx_cid(wil, cid, agg_wsize);
562} 582}
563 583
564static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len) 584static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
@@ -583,55 +603,89 @@ static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
583 int len) 603 int len)
584{ 604{
585 struct wmi_vring_ba_status_event *evt = d; 605 struct wmi_vring_ba_status_event *evt = d;
586 struct wil_sta_info *sta; 606 struct vring_tx_data *txdata;
587 uint i, cid;
588 607
589 /* TODO: use Rx BA status, not Tx one */ 608 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
590
591 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
592 evt->ringid, 609 evt->ringid,
593 evt->status == WMI_BA_AGREED ? "OK" : "N/A", 610 evt->status == WMI_BA_AGREED ? "OK" : "N/A",
594 evt->agg_wsize, __le16_to_cpu(evt->ba_timeout)); 611 evt->agg_wsize, __le16_to_cpu(evt->ba_timeout),
612 evt->amsdu ? "+" : "-");
595 613
596 if (evt->ringid >= WIL6210_MAX_TX_RINGS) { 614 if (evt->ringid >= WIL6210_MAX_TX_RINGS) {
597 wil_err(wil, "invalid ring id %d\n", evt->ringid); 615 wil_err(wil, "invalid ring id %d\n", evt->ringid);
598 return; 616 return;
599 } 617 }
600 618
601 mutex_lock(&wil->mutex); 619 if (evt->status != WMI_BA_AGREED) {
602 620 evt->ba_timeout = 0;
603 cid = wil->vring2cid_tid[evt->ringid][0]; 621 evt->agg_wsize = 0;
604 if (cid >= WIL6210_MAX_CID) { 622 evt->amsdu = 0;
605 wil_err(wil, "invalid CID %d for vring %d\n", cid, evt->ringid);
606 goto out;
607 } 623 }
608 624
609 sta = &wil->sta[cid]; 625 txdata = &wil->vring_tx_data[evt->ringid];
610 if (sta->status == wil_sta_unused) {
611 wil_err(wil, "CID %d unused\n", cid);
612 goto out;
613 }
614 626
615 wil_dbg_wmi(wil, "BACK for CID %d %pM\n", cid, sta->addr); 627 txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
616 for (i = 0; i < WIL_STA_TID_NUM; i++) { 628 txdata->agg_wsize = evt->agg_wsize;
617 struct wil_tid_ampdu_rx *r; 629 txdata->agg_amsdu = evt->amsdu;
618 unsigned long flags; 630 txdata->addba_in_progress = false;
631}
619 632
620 spin_lock_irqsave(&sta->tid_rx_lock, flags); 633static void wmi_evt_addba_rx_req(struct wil6210_priv *wil, int id, void *d,
634 int len)
635{
636 struct wmi_rcp_addba_req_event *evt = d;
621 637
622 r = sta->tid_rx[i]; 638 wil_addba_rx_request(wil, evt->cidxtid, evt->dialog_token,
623 sta->tid_rx[i] = NULL; 639 evt->ba_param_set, evt->ba_timeout,
624 wil_tid_ampdu_rx_free(wil, r); 640 evt->ba_seq_ctrl);
641}
625 642
626 spin_unlock_irqrestore(&sta->tid_rx_lock, flags); 643static void wmi_evt_delba(struct wil6210_priv *wil, int id, void *d, int len)
644__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
645{
646 struct wmi_delba_event *evt = d;
647 u8 cid, tid;
648 u16 reason = __le16_to_cpu(evt->reason);
649 struct wil_sta_info *sta;
650 struct wil_tid_ampdu_rx *r;
627 651
628 if ((evt->status == WMI_BA_AGREED) && evt->agg_wsize) 652 might_sleep();
629 sta->tid_rx[i] = wil_tid_ampdu_rx_alloc(wil, 653 parse_cidxtid(evt->cidxtid, &cid, &tid);
630 evt->agg_wsize, 0); 654 wil_dbg_wmi(wil, "DELBA CID %d TID %d from %s reason %d\n",
655 cid, tid,
656 evt->from_initiator ? "originator" : "recipient",
657 reason);
658 if (!evt->from_initiator) {
659 int i;
660 /* find Tx vring it belongs to */
661 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
662 if ((wil->vring2cid_tid[i][0] == cid) &&
663 (wil->vring2cid_tid[i][1] == tid)) {
664 struct vring_tx_data *txdata =
665 &wil->vring_tx_data[i];
666
667 wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
668 txdata->agg_timeout = 0;
669 txdata->agg_wsize = 0;
670 txdata->addba_in_progress = false;
671
672 break; /* max. 1 matching ring */
673 }
674 }
675 if (i >= ARRAY_SIZE(wil->vring2cid_tid))
676 wil_err(wil, "DELBA: unable to find Tx vring\n");
677 return;
631 } 678 }
632 679
633out: 680 sta = &wil->sta[cid];
634 mutex_unlock(&wil->mutex); 681
682 spin_lock_bh(&sta->tid_rx_lock);
683
684 r = sta->tid_rx[tid];
685 sta->tid_rx[tid] = NULL;
686 wil_tid_ampdu_rx_free(wil, r);
687
688 spin_unlock_bh(&sta->tid_rx_lock);
635} 689}
636 690
637static const struct { 691static const struct {
@@ -649,6 +703,8 @@ static const struct {
649 {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup}, 703 {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
650 {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown}, 704 {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
651 {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status}, 705 {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
706 {WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
707 {WMI_DELBA_EVENTID, wmi_evt_delba},
652}; 708};
653 709
654/* 710/*
@@ -668,7 +724,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
668 ulong flags; 724 ulong flags;
669 unsigned n; 725 unsigned n;
670 726
671 if (!test_bit(wil_status_reset_done, &wil->status)) { 727 if (!test_bit(wil_status_reset_done, wil->status)) {
672 wil_err(wil, "Reset in progress. Cannot handle WMI event\n"); 728 wil_err(wil, "Reset in progress. Cannot handle WMI event\n");
673 return; 729 return;
674 } 730 }
@@ -1025,13 +1081,14 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
1025 struct wmi_cfg_rx_chain_cmd cmd = { 1081 struct wmi_cfg_rx_chain_cmd cmd = {
1026 .action = WMI_RX_CHAIN_ADD, 1082 .action = WMI_RX_CHAIN_ADD,
1027 .rx_sw_ring = { 1083 .rx_sw_ring = {
1028 .max_mpdu_size = cpu_to_le16(mtu_max + ETH_HLEN), 1084 .max_mpdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1029 .ring_mem_base = cpu_to_le64(vring->pa), 1085 .ring_mem_base = cpu_to_le64(vring->pa),
1030 .ring_size = cpu_to_le16(vring->size), 1086 .ring_size = cpu_to_le16(vring->size),
1031 }, 1087 },
1032 .mid = 0, /* TODO - what is it? */ 1088 .mid = 0, /* TODO - what is it? */
1033 .decap_trans_type = WMI_DECAP_TYPE_802_3, 1089 .decap_trans_type = WMI_DECAP_TYPE_802_3,
1034 .reorder_type = WMI_RX_SW_REORDER, 1090 .reorder_type = WMI_RX_SW_REORDER,
1091 .host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh),
1035 }; 1092 };
1036 struct { 1093 struct {
1037 struct wil6210_mbox_hdr_wmi wmi; 1094 struct wil6210_mbox_hdr_wmi wmi;
@@ -1074,12 +1131,13 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
1074 return rc; 1131 return rc;
1075} 1132}
1076 1133
1077int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r) 1134int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
1078{ 1135{
1079 int rc; 1136 int rc;
1080 struct wmi_temp_sense_cmd cmd = { 1137 struct wmi_temp_sense_cmd cmd = {
1081 .measure_marlon_m_en = cpu_to_le32(!!t_m), 1138 .measure_baseband_en = cpu_to_le32(!!t_bb),
1082 .measure_marlon_r_en = cpu_to_le32(!!t_r), 1139 .measure_rf_en = cpu_to_le32(!!t_rf),
1140 .measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW),
1083 }; 1141 };
1084 struct { 1142 struct {
1085 struct wil6210_mbox_hdr_wmi wmi; 1143 struct wil6210_mbox_hdr_wmi wmi;
@@ -1091,10 +1149,10 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
1091 if (rc) 1149 if (rc)
1092 return rc; 1150 return rc;
1093 1151
1094 if (t_m) 1152 if (t_bb)
1095 *t_m = le32_to_cpu(reply.evt.marlon_m_t1000); 1153 *t_bb = le32_to_cpu(reply.evt.baseband_t1000);
1096 if (t_r) 1154 if (t_rf)
1097 *t_r = le32_to_cpu(reply.evt.marlon_r_t1000); 1155 *t_rf = le32_to_cpu(reply.evt.rf_t1000);
1098 1156
1099 return 0; 1157 return 0;
1100} 1158}
@@ -1111,6 +1169,87 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
1111 return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd)); 1169 return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
1112} 1170}
1113 1171
1172int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
1173{
1174 struct wmi_vring_ba_en_cmd cmd = {
1175 .ringid = ringid,
1176 .agg_max_wsize = size,
1177 .ba_timeout = cpu_to_le16(timeout),
1178 .amsdu = 0,
1179 };
1180
1181 wil_dbg_wmi(wil, "%s(ring %d size %d timeout %d)\n", __func__,
1182 ringid, size, timeout);
1183
1184 return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd));
1185}
1186
1187int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason)
1188{
1189 struct wmi_vring_ba_dis_cmd cmd = {
1190 .ringid = ringid,
1191 .reason = cpu_to_le16(reason),
1192 };
1193
1194 wil_dbg_wmi(wil, "%s(ring %d reason %d)\n", __func__,
1195 ringid, reason);
1196
1197 return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd));
1198}
1199
1200int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason)
1201{
1202 struct wmi_rcp_delba_cmd cmd = {
1203 .cidxtid = cidxtid,
1204 .reason = cpu_to_le16(reason),
1205 };
1206
1207 wil_dbg_wmi(wil, "%s(CID %d TID %d reason %d)\n", __func__,
1208 cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason);
1209
1210 return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd));
1211}
1212
1213int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
1214 u16 status, bool amsdu, u16 agg_wsize, u16 timeout)
1215{
1216 int rc;
1217 struct wmi_rcp_addba_resp_cmd cmd = {
1218 .cidxtid = mk_cidxtid(cid, tid),
1219 .dialog_token = token,
1220 .status_code = cpu_to_le16(status),
1221 /* bit 0: A-MSDU supported
1222 * bit 1: policy (should be 0 for us)
1223 * bits 2..5: TID
1224 * bits 6..15: buffer size
1225 */
1226 .ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
1227 (agg_wsize << 6)),
1228 .ba_timeout = cpu_to_le16(timeout),
1229 };
1230 struct {
1231 struct wil6210_mbox_hdr_wmi wmi;
1232 struct wmi_rcp_addba_resp_sent_event evt;
1233 } __packed reply;
1234
1235 wil_dbg_wmi(wil,
1236 "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s\n",
1237 cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-");
1238
1239 rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd),
1240 WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100);
1241 if (rc)
1242 return rc;
1243
1244 if (reply.evt.status) {
1245 wil_err(wil, "ADDBA response failed with status %d\n",
1246 le16_to_cpu(reply.evt.status));
1247 rc = -EINVAL;
1248 }
1249
1250 return rc;
1251}
1252
1114void wmi_event_flush(struct wil6210_priv *wil) 1253void wmi_event_flush(struct wil6210_priv *wil)
1115{ 1254{
1116 struct pending_wmi_event *evt, *t; 1255 struct pending_wmi_event *evt, *t;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 27b97432d1c2..8a4af613e191 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -29,8 +29,10 @@
29 29
30/* General */ 30/* General */
31#define WILOCITY_MAX_ASSOC_STA (8) 31#define WILOCITY_MAX_ASSOC_STA (8)
32#define WILOCITY_DEFAULT_ASSOC_STA (1)
32#define WMI_MAC_LEN (6) 33#define WMI_MAC_LEN (6)
33#define WMI_PROX_RANGE_NUM (3) 34#define WMI_PROX_RANGE_NUM (3)
35#define WMI_MAX_LOSS_DMG_BEACONS (32)
34 36
35/* List of Commands */ 37/* List of Commands */
36enum wmi_command_id { 38enum wmi_command_id {
@@ -48,7 +50,7 @@ enum wmi_command_id {
48 WMI_SET_WSC_STATUS_CMDID = 0x0041, 50 WMI_SET_WSC_STATUS_CMDID = 0x0041,
49 WMI_PXMT_RANGE_CFG_CMDID = 0x0042, 51 WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
50 WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043, 52 WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
51 WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300, 53/* WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300, */
52 WMI_MEM_READ_CMDID = 0x0800, 54 WMI_MEM_READ_CMDID = 0x0800,
53 WMI_MEM_WR_CMDID = 0x0801, 55 WMI_MEM_WR_CMDID = 0x0801,
54 WMI_ECHO_CMDID = 0x0803, 56 WMI_ECHO_CMDID = 0x0803,
@@ -102,6 +104,8 @@ enum wmi_command_id {
102 WMI_MAINTAIN_RESUME_CMDID = 0x0851, 104 WMI_MAINTAIN_RESUME_CMDID = 0x0851,
103 WMI_RS_MGMT_CMDID = 0x0852, 105 WMI_RS_MGMT_CMDID = 0x0852,
104 WMI_RF_MGMT_CMDID = 0x0853, 106 WMI_RF_MGMT_CMDID = 0x0853,
107 WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x0854,
108 WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x0855,
105 /* Performance monitoring commands */ 109 /* Performance monitoring commands */
106 WMI_BF_CTRL_CMDID = 0x0862, 110 WMI_BF_CTRL_CMDID = 0x0862,
107 WMI_NOTIFY_REQ_CMDID = 0x0863, 111 WMI_NOTIFY_REQ_CMDID = 0x0863,
@@ -136,6 +140,7 @@ enum wmi_command_id {
136 WMI_EAPOL_TX_CMDID = 0xf04c, 140 WMI_EAPOL_TX_CMDID = 0xf04c,
137 WMI_MAC_ADDR_REQ_CMDID = 0xf04d, 141 WMI_MAC_ADDR_REQ_CMDID = 0xf04d,
138 WMI_FW_VER_CMDID = 0xf04e, 142 WMI_FW_VER_CMDID = 0xf04e,
143 WMI_PMC_CMDID = 0xf04f,
139}; 144};
140 145
141/* 146/*
@@ -283,8 +288,8 @@ enum wmi_scan_type {
283 WMI_LONG_SCAN = 0, 288 WMI_LONG_SCAN = 0,
284 WMI_SHORT_SCAN = 1, 289 WMI_SHORT_SCAN = 1,
285 WMI_PBC_SCAN = 2, 290 WMI_PBC_SCAN = 2,
286 WMI_ACTIVE_SCAN = 3, 291 WMI_DIRECT_SCAN = 3,
287 WMI_DIRECT_SCAN = 4, 292 WMI_ACTIVE_SCAN = 4,
288}; 293};
289 294
290struct wmi_start_scan_cmd { 295struct wmi_start_scan_cmd {
@@ -375,6 +380,17 @@ struct wmi_rf_mgmt_cmd {
375} __packed; 380} __packed;
376 381
377/* 382/*
383 * WMI_THERMAL_THROTTLING_CTRL_CMDID
384 */
385#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF)
386
387struct wmi_thermal_throttling_ctrl_cmd {
388 __le32 time_on_usec;
389 __le32 time_off_usec;
390 __le32 max_txop_length_usec;
391} __packed;
392
393/*
378 * WMI_RF_RX_TEST_CMDID 394 * WMI_RF_RX_TEST_CMDID
379 */ 395 */
380struct wmi_rf_rx_test_cmd { 396struct wmi_rf_rx_test_cmd {
@@ -586,6 +602,7 @@ struct wmi_vring_ba_en_cmd {
586 u8 ringid; 602 u8 ringid;
587 u8 agg_max_wsize; 603 u8 agg_max_wsize;
588 __le16 ba_timeout; 604 __le16 ba_timeout;
605 u8 amsdu;
589} __packed; 606} __packed;
590 607
591/* 608/*
@@ -647,6 +664,7 @@ enum wmi_cfg_rx_chain_cmd_action {
647enum wmi_cfg_rx_chain_cmd_decap_trans_type { 664enum wmi_cfg_rx_chain_cmd_decap_trans_type {
648 WMI_DECAP_TYPE_802_3 = 0, 665 WMI_DECAP_TYPE_802_3 = 0,
649 WMI_DECAP_TYPE_NATIVE_WIFI = 1, 666 WMI_DECAP_TYPE_NATIVE_WIFI = 1,
667 WMI_DECAP_TYPE_NONE = 2,
650}; 668};
651 669
652enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type { 670enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
@@ -784,9 +802,17 @@ struct wmi_echo_cmd {
784 * 802 *
785 * Measure MAC and radio temperatures 803 * Measure MAC and radio temperatures
786 */ 804 */
805
806/* Possible modes for temperature measurement */
807enum wmi_temperature_measure_mode {
808 TEMPERATURE_USE_OLD_VALUE = 0x1,
809 TEMPERATURE_MEASURE_NOW = 0x2,
810};
811
787struct wmi_temp_sense_cmd { 812struct wmi_temp_sense_cmd {
788 __le32 measure_marlon_m_en; 813 __le32 measure_baseband_en;
789 __le32 measure_marlon_r_en; 814 __le32 measure_rf_en;
815 __le32 measure_mode;
790} __packed; 816} __packed;
791 817
792/* 818/*
@@ -842,6 +868,7 @@ enum wmi_event_id {
842 WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839, 868 WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
843 WMI_RS_MGMT_DONE_EVENTID = 0x1852, 869 WMI_RS_MGMT_DONE_EVENTID = 0x1852,
844 WMI_RF_MGMT_STATUS_EVENTID = 0x1853, 870 WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
871 WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
845 WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838, 872 WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
846 WMI_RX_MGMT_PACKET_EVENTID = 0x1840, 873 WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
847 WMI_TX_MGMT_PACKET_EVENTID = 0x1841, 874 WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
@@ -858,6 +885,7 @@ enum wmi_event_id {
858 WMI_FLASH_READ_DONE_EVENTID = 0x1902, 885 WMI_FLASH_READ_DONE_EVENTID = 0x1902,
859 WMI_FLASH_WRITE_DONE_EVENTID = 0x1903, 886 WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
860 /*P2P*/ 887 /*P2P*/
888 WMI_P2P_CFG_DONE_EVENTID = 0x1910,
861 WMI_PORT_ALLOCATED_EVENTID = 0x1911, 889 WMI_PORT_ALLOCATED_EVENTID = 0x1911,
862 WMI_PORT_DELETED_EVENTID = 0x1912, 890 WMI_PORT_DELETED_EVENTID = 0x1912,
863 WMI_LISTEN_STARTED_EVENTID = 0x1914, 891 WMI_LISTEN_STARTED_EVENTID = 0x1914,
@@ -898,6 +926,15 @@ struct wmi_rf_mgmt_status_event {
898} __packed; 926} __packed;
899 927
900/* 928/*
929 * WMI_THERMAL_THROTTLING_STATUS_EVENTID
930 */
931struct wmi_thermal_throttling_status_event {
932 __le32 time_on_usec;
933 __le32 time_off_usec;
934 __le32 max_txop_length_usec;
935} __packed;
936
937/*
901 * WMI_GET_STATUS_DONE_EVENTID 938 * WMI_GET_STATUS_DONE_EVENTID
902 */ 939 */
903struct wmi_get_status_done_event { 940struct wmi_get_status_done_event {
@@ -1052,14 +1089,23 @@ struct wmi_scan_complete_event {
1052enum wmi_vring_ba_status { 1089enum wmi_vring_ba_status {
1053 WMI_BA_AGREED = 0, 1090 WMI_BA_AGREED = 0,
1054 WMI_BA_NON_AGREED = 1, 1091 WMI_BA_NON_AGREED = 1,
1092 /* BA_EN in middle of teardown flow */
1093 WMI_BA_TD_WIP = 2,
1094 /* BA_DIS or BA_EN in middle of BA SETUP flow */
1095 WMI_BA_SETUP_WIP = 3,
1096 /* BA_EN when the BA session is already active */
1097 WMI_BA_SESSION_ACTIVE = 4,
1098 /* BA_DIS when the BA session is not active */
1099 WMI_BA_SESSION_NOT_ACTIVE = 5,
1055}; 1100};
1056 1101
1057struct wmi_vring_ba_status_event { 1102struct wmi_vring_ba_status_event {
1058 __le16 status; 1103 __le16 status; /* enum wmi_vring_ba_status */
1059 u8 reserved[2]; 1104 u8 reserved[2];
1060 u8 ringid; 1105 u8 ringid;
1061 u8 agg_wsize; 1106 u8 agg_wsize;
1062 __le16 ba_timeout; 1107 __le16 ba_timeout;
1108 u8 amsdu;
1063} __packed; 1109} __packed;
1064 1110
1065/* 1111/*
@@ -1145,6 +1191,14 @@ struct wmi_get_pcp_channel_event {
1145} __packed; 1191} __packed;
1146 1192
1147/* 1193/*
1194 * WMI_P2P_CFG_DONE_EVENTID
1195 */
1196struct wmi_p2p_cfg_done_event {
1197 u8 status; /* wmi_fw_status */
1198 u8 reserved[3];
1199} __packed;
1200
1201/*
1148* WMI_PORT_ALLOCATED_EVENTID 1202* WMI_PORT_ALLOCATED_EVENTID
1149*/ 1203*/
1150struct wmi_port_allocated_event { 1204struct wmi_port_allocated_event {
@@ -1272,8 +1326,8 @@ struct wmi_echo_event {
1272 * Measure MAC and radio temperatures 1326 * Measure MAC and radio temperatures
1273 */ 1327 */
1274struct wmi_temp_sense_done_event { 1328struct wmi_temp_sense_done_event {
1275 __le32 marlon_m_t1000; 1329 __le32 baseband_t1000;
1276 __le32 marlon_r_t1000; 1330 __le32 rf_t1000;
1277} __packed; 1331} __packed;
1278 1332
1279#endif /* __WILOCITY_WMI_H__ */ 1333#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 9183f1cf89a7..55db9f03eb2a 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -45,7 +45,6 @@
45#include <linux/ptrace.h> 45#include <linux/ptrace.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/string.h> 47#include <linux/string.h>
48#include <linux/ctype.h>
49#include <linux/timer.h> 48#include <linux/timer.h>
50#include <asm/byteorder.h> 49#include <asm/byteorder.h>
51#include <asm/io.h> 50#include <asm/io.h>
@@ -2699,16 +2698,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2699 domain[REGDOMAINSZ] = 0; 2698 domain[REGDOMAINSZ] = 0;
2700 rc = -EINVAL; 2699 rc = -EINVAL;
2701 for (i = 0; i < ARRAY_SIZE(channel_table); i++) { 2700 for (i = 0; i < ARRAY_SIZE(channel_table); i++) {
2702 /* strcasecmp doesn't exist in the library */ 2701 if (!strcasecmp(channel_table[i].name, domain)) {
2703 char *a = channel_table[i].name;
2704 char *b = domain;
2705 while (*a) {
2706 char c1 = *a++;
2707 char c2 = *b++;
2708 if (tolower(c1) != tolower(c2))
2709 break;
2710 }
2711 if (!*a && !*b) {
2712 priv->config_reg_domain = channel_table[i].reg_domain; 2702 priv->config_reg_domain = channel_table[i].reg_domain;
2713 rc = 0; 2703 rc = 0;
2714 } 2704 }
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 64a5b672e30a..759fb8d41fc9 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -166,6 +166,15 @@ config B43_PHY_LCN
166 166
167 Say N, this is BROKEN and crashes driver. 167 Say N, this is BROKEN and crashes driver.
168 168
169config B43_PHY_AC
170 bool "Support for AC-PHY (802.11ac) devices (BROKEN)"
171 depends on B43 && B43_BCMA && BROKEN
172 ---help---
173 This PHY type can be found in the following chipsets:
174 PCI: BCM4352, BCM4360
175
176 Say N, this is BROKEN and crashes driver.
177
169# This config option automatically enables b43 LEDS support, 178# This config option automatically enables b43 LEDS support,
170# if it's possible. 179# if it's possible.
171config B43_LEDS 180config B43_LEDS
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 9f7965aae93d..c624d4d90e4f 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -13,6 +13,7 @@ b43-$(CONFIG_B43_PHY_HT) += phy_ht.o
13b43-$(CONFIG_B43_PHY_HT) += tables_phy_ht.o 13b43-$(CONFIG_B43_PHY_HT) += tables_phy_ht.o
14b43-$(CONFIG_B43_PHY_HT) += radio_2059.o 14b43-$(CONFIG_B43_PHY_HT) += radio_2059.o
15b43-$(CONFIG_B43_PHY_LCN) += phy_lcn.o tables_phy_lcn.o 15b43-$(CONFIG_B43_PHY_LCN) += phy_lcn.o tables_phy_lcn.o
16b43-$(CONFIG_B43_PHY_AC) += phy_ac.o
16b43-y += sysfs.o 17b43-y += sysfs.o
17b43-y += xmit.o 18b43-y += xmit.o
18b43-y += dma.o 19b43-y += dma.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index bb12586cd7cd..036552439816 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -500,6 +500,8 @@ enum {
500#define B43_BCMA_IOCTL_PHY_BW_10MHZ 0x00000000 /* 10 MHz bandwidth, 40 MHz PHY */ 500#define B43_BCMA_IOCTL_PHY_BW_10MHZ 0x00000000 /* 10 MHz bandwidth, 40 MHz PHY */
501#define B43_BCMA_IOCTL_PHY_BW_20MHZ 0x00000040 /* 20 MHz bandwidth, 80 MHz PHY */ 501#define B43_BCMA_IOCTL_PHY_BW_20MHZ 0x00000040 /* 20 MHz bandwidth, 80 MHz PHY */
502#define B43_BCMA_IOCTL_PHY_BW_40MHZ 0x00000080 /* 40 MHz bandwidth, 160 MHz PHY */ 502#define B43_BCMA_IOCTL_PHY_BW_40MHZ 0x00000080 /* 40 MHz bandwidth, 160 MHz PHY */
503#define B43_BCMA_IOCTL_PHY_BW_80MHZ 0x000000C0 /* 80 MHz bandwidth */
504#define B43_BCMA_IOCTL_DAC 0x00000300 /* Highspeed DAC mode control field */
503#define B43_BCMA_IOCTL_GMODE 0x00002000 /* G Mode Enable */ 505#define B43_BCMA_IOCTL_GMODE 0x00002000 /* G Mode Enable */
504 506
505/* BCMA 802.11 core specific IO status (BCMA_IOST) flags */ 507/* BCMA 802.11 core specific IO status (BCMA_IOST) flags */
@@ -941,6 +943,7 @@ struct b43_wl {
941 bool beacon1_uploaded; 943 bool beacon1_uploaded;
942 bool beacon_templates_virgin; /* Never wrote the templates? */ 944 bool beacon_templates_virgin; /* Never wrote the templates? */
943 struct work_struct beacon_update_trigger; 945 struct work_struct beacon_update_trigger;
946 spinlock_t beacon_lock;
944 947
945 /* The current QOS parameters for the 4 queues. */ 948 /* The current QOS parameters for the 4 queues. */
946 struct b43_qos_params qos_params[B43_QOS_QUEUE_NUM]; 949 struct b43_qos_params qos_params[B43_QOS_QUEUE_NUM];
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 47731cb0d815..2c9088633ec6 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1262,6 +1262,23 @@ static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1262 flags |= B43_BCMA_IOCTL_GMODE; 1262 flags |= B43_BCMA_IOCTL_GMODE;
1263 b43_device_enable(dev, flags); 1263 b43_device_enable(dev, flags);
1264 1264
1265 if (dev->phy.type == B43_PHYTYPE_AC) {
1266 u16 tmp;
1267
1268 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
1269 tmp &= ~B43_BCMA_IOCTL_DAC;
1270 tmp |= 0x100;
1271 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
1272
1273 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
1274 tmp &= ~B43_BCMA_IOCTL_PHY_CLKEN;
1275 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
1276
1277 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
1278 tmp |= B43_BCMA_IOCTL_PHY_CLKEN;
1279 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
1280 }
1281
1265 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST); 1282 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
1266 b43_bcma_phy_reset(dev); 1283 b43_bcma_phy_reset(dev);
1267 bcma_core_pll_ctl(dev->dev->bdev, req, status, true); 1284 bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
@@ -1601,12 +1618,26 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
1601 unsigned int rate; 1618 unsigned int rate;
1602 u16 ctl; 1619 u16 ctl;
1603 int antenna; 1620 int antenna;
1604 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon); 1621 struct ieee80211_tx_info *info;
1622 unsigned long flags;
1623 struct sk_buff *beacon_skb;
1605 1624
1606 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data); 1625 spin_lock_irqsave(&dev->wl->beacon_lock, flags);
1607 len = min_t(size_t, dev->wl->current_beacon->len, 1626 info = IEEE80211_SKB_CB(dev->wl->current_beacon);
1608 0x200 - sizeof(struct b43_plcp_hdr6));
1609 rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value; 1627 rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
1628 /* Clone the beacon, so it cannot go away, while we write it to hw. */
1629 beacon_skb = skb_clone(dev->wl->current_beacon, GFP_ATOMIC);
1630 spin_unlock_irqrestore(&dev->wl->beacon_lock, flags);
1631
1632 if (!beacon_skb) {
1633 b43dbg(dev->wl, "Could not upload beacon. "
1634 "Failed to clone beacon skb.");
1635 return;
1636 }
1637
1638 bcn = (const struct ieee80211_mgmt *)(beacon_skb->data);
1639 len = min_t(size_t, beacon_skb->len,
1640 0x200 - sizeof(struct b43_plcp_hdr6));
1610 1641
1611 b43_write_template_common(dev, (const u8 *)bcn, 1642 b43_write_template_common(dev, (const u8 *)bcn,
1612 len, ram_offset, shm_size_offset, rate); 1643 len, ram_offset, shm_size_offset, rate);
@@ -1674,6 +1705,8 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
1674 B43_SHM_SH_DTIMPER, 0); 1705 B43_SHM_SH_DTIMPER, 0);
1675 } 1706 }
1676 b43dbg(dev->wl, "Updated beacon template at 0x%x\n", ram_offset); 1707 b43dbg(dev->wl, "Updated beacon template at 0x%x\n", ram_offset);
1708
1709 dev_kfree_skb_any(beacon_skb);
1677} 1710}
1678 1711
1679static void b43_upload_beacon0(struct b43_wldev *dev) 1712static void b43_upload_beacon0(struct b43_wldev *dev)
@@ -1790,13 +1823,13 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
1790 mutex_unlock(&wl->mutex); 1823 mutex_unlock(&wl->mutex);
1791} 1824}
1792 1825
1793/* Asynchronously update the packet templates in template RAM. 1826/* Asynchronously update the packet templates in template RAM. */
1794 * Locking: Requires wl->mutex to be locked. */
1795static void b43_update_templates(struct b43_wl *wl) 1827static void b43_update_templates(struct b43_wl *wl)
1796{ 1828{
1797 struct sk_buff *beacon; 1829 struct sk_buff *beacon, *old_beacon;
1830 unsigned long flags;
1798 1831
1799 /* This is the top half of the ansynchronous beacon update. 1832 /* This is the top half of the asynchronous beacon update.
1800 * The bottom half is the beacon IRQ. 1833 * The bottom half is the beacon IRQ.
1801 * Beacon update must be asynchronous to avoid sending an 1834 * Beacon update must be asynchronous to avoid sending an
1802 * invalid beacon. This can happen for example, if the firmware 1835 * invalid beacon. This can happen for example, if the firmware
@@ -1810,12 +1843,17 @@ static void b43_update_templates(struct b43_wl *wl)
1810 if (unlikely(!beacon)) 1843 if (unlikely(!beacon))
1811 return; 1844 return;
1812 1845
1813 if (wl->current_beacon) 1846 spin_lock_irqsave(&wl->beacon_lock, flags);
1814 dev_kfree_skb_any(wl->current_beacon); 1847 old_beacon = wl->current_beacon;
1815 wl->current_beacon = beacon; 1848 wl->current_beacon = beacon;
1816 wl->beacon0_uploaded = false; 1849 wl->beacon0_uploaded = false;
1817 wl->beacon1_uploaded = false; 1850 wl->beacon1_uploaded = false;
1851 spin_unlock_irqrestore(&wl->beacon_lock, flags);
1852
1818 ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger); 1853 ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
1854
1855 if (old_beacon)
1856 dev_kfree_skb_any(old_beacon);
1819} 1857}
1820 1858
1821static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int) 1859static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int)
@@ -4318,6 +4356,7 @@ redo:
4318 mutex_unlock(&wl->mutex); 4356 mutex_unlock(&wl->mutex);
4319 cancel_delayed_work_sync(&dev->periodic_work); 4357 cancel_delayed_work_sync(&dev->periodic_work);
4320 cancel_work_sync(&wl->tx_work); 4358 cancel_work_sync(&wl->tx_work);
4359 b43_leds_stop(dev);
4321 mutex_lock(&wl->mutex); 4360 mutex_lock(&wl->mutex);
4322 dev = wl->current_dev; 4361 dev = wl->current_dev;
4323 if (!dev || b43_status(dev) < B43_STAT_STARTED) { 4362 if (!dev || b43_status(dev) < B43_STAT_STARTED) {
@@ -4505,6 +4544,12 @@ static int b43_phy_versioning(struct b43_wldev *dev)
4505 unsupported = 1; 4544 unsupported = 1;
4506 break; 4545 break;
4507#endif 4546#endif
4547#ifdef CONFIG_B43_PHY_AC
4548 case B43_PHYTYPE_AC:
4549 if (phy_rev > 1)
4550 unsupported = 1;
4551 break;
4552#endif
4508 default: 4553 default:
4509 unsupported = 1; 4554 unsupported = 1;
4510 } 4555 }
@@ -4601,6 +4646,10 @@ static int b43_phy_versioning(struct b43_wldev *dev)
4601 if (radio_id != 0x2064) 4646 if (radio_id != 0x2064)
4602 unsupported = 1; 4647 unsupported = 1;
4603 break; 4648 break;
4649 case B43_PHYTYPE_AC:
4650 if (radio_id != 0x2069)
4651 unsupported = 1;
4652 break;
4604 default: 4653 default:
4605 B43_WARN_ON(1); 4654 B43_WARN_ON(1);
4606 } 4655 }
@@ -5094,7 +5143,6 @@ static int b43_op_beacon_set_tim(struct ieee80211_hw *hw,
5094{ 5143{
5095 struct b43_wl *wl = hw_to_b43_wl(hw); 5144 struct b43_wl *wl = hw_to_b43_wl(hw);
5096 5145
5097 /* FIXME: add locking */
5098 b43_update_templates(wl); 5146 b43_update_templates(wl);
5099 5147
5100 return 0; 5148 return 0;
@@ -5584,6 +5632,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5584 wl->hw = hw; 5632 wl->hw = hw;
5585 mutex_init(&wl->mutex); 5633 mutex_init(&wl->mutex);
5586 spin_lock_init(&wl->hardirq_lock); 5634 spin_lock_init(&wl->hardirq_lock);
5635 spin_lock_init(&wl->beacon_lock);
5587 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work); 5636 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
5588 INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work); 5637 INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
5589 INIT_WORK(&wl->tx_work, b43_tx_work); 5638 INIT_WORK(&wl->tx_work, b43_tx_work);
diff --git a/drivers/net/wireless/b43/phy_ac.c b/drivers/net/wireless/b43/phy_ac.c
new file mode 100644
index 000000000000..e75633d67938
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_ac.c
@@ -0,0 +1,92 @@
1/*
2 * Broadcom B43 wireless driver
3 * IEEE 802.11ac AC-PHY support
4 *
5 * Copyright (c) 2015 Rafał Miłecki <zajec5@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include "b43.h"
14#include "phy_ac.h"
15
16/**************************************************
17 * Basic PHY ops
18 **************************************************/
19
20static int b43_phy_ac_op_allocate(struct b43_wldev *dev)
21{
22 struct b43_phy_ac *phy_ac;
23
24 phy_ac = kzalloc(sizeof(*phy_ac), GFP_KERNEL);
25 if (!phy_ac)
26 return -ENOMEM;
27 dev->phy.ac = phy_ac;
28
29 return 0;
30}
31
32static void b43_phy_ac_op_free(struct b43_wldev *dev)
33{
34 struct b43_phy *phy = &dev->phy;
35 struct b43_phy_ac *phy_ac = phy->ac;
36
37 kfree(phy_ac);
38 phy->ac = NULL;
39}
40
41static void b43_phy_ac_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask,
42 u16 set)
43{
44 b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg);
45 b43_write16(dev, B43_MMIO_PHY_DATA,
46 (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set);
47}
48
49static u16 b43_phy_ac_op_radio_read(struct b43_wldev *dev, u16 reg)
50{
51 b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg);
52 return b43_read16(dev, B43_MMIO_RADIO24_DATA);
53}
54
55static void b43_phy_ac_op_radio_write(struct b43_wldev *dev, u16 reg,
56 u16 value)
57{
58 b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg);
59 b43_write16(dev, B43_MMIO_RADIO24_DATA, value);
60}
61
62static unsigned int b43_phy_ac_op_get_default_chan(struct b43_wldev *dev)
63{
64 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
65 return 11;
66 return 36;
67}
68
69static enum b43_txpwr_result
70b43_phy_ac_op_recalc_txpower(struct b43_wldev *dev, bool ignore_tssi)
71{
72 return B43_TXPWR_RES_DONE;
73}
74
75static void b43_phy_ac_op_adjust_txpower(struct b43_wldev *dev)
76{
77}
78
79/**************************************************
80 * PHY ops struct
81 **************************************************/
82
83const struct b43_phy_operations b43_phyops_ac = {
84 .allocate = b43_phy_ac_op_allocate,
85 .free = b43_phy_ac_op_free,
86 .phy_maskset = b43_phy_ac_op_maskset,
87 .radio_read = b43_phy_ac_op_radio_read,
88 .radio_write = b43_phy_ac_op_radio_write,
89 .get_default_chan = b43_phy_ac_op_get_default_chan,
90 .recalc_txpower = b43_phy_ac_op_recalc_txpower,
91 .adjust_txpower = b43_phy_ac_op_adjust_txpower,
92};
diff --git a/drivers/net/wireless/b43/phy_ac.h b/drivers/net/wireless/b43/phy_ac.h
new file mode 100644
index 000000000000..d1ca79e0eb24
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_ac.h
@@ -0,0 +1,38 @@
1#ifndef B43_PHY_AC_H_
2#define B43_PHY_AC_H_
3
4#include "phy_common.h"
5
6#define B43_PHY_AC_BBCFG 0x001
7#define B43_PHY_AC_BBCFG_RSTCCA 0x4000 /* Reset CCA */
8#define B43_PHY_AC_BANDCTL 0x003 /* Band control */
9#define B43_PHY_AC_BANDCTL_5GHZ 0x0001
10#define B43_PHY_AC_TABLE_ID 0x00d
11#define B43_PHY_AC_TABLE_OFFSET 0x00e
12#define B43_PHY_AC_TABLE_DATA1 0x00f
13#define B43_PHY_AC_TABLE_DATA2 0x010
14#define B43_PHY_AC_TABLE_DATA3 0x011
15#define B43_PHY_AC_CLASSCTL 0x140 /* Classifier control */
16#define B43_PHY_AC_CLASSCTL_CCKEN 0x0001 /* CCK enable */
17#define B43_PHY_AC_CLASSCTL_OFDMEN 0x0002 /* OFDM enable */
18#define B43_PHY_AC_CLASSCTL_WAITEDEN 0x0004 /* Waited enable */
19#define B43_PHY_AC_BW1A 0x371
20#define B43_PHY_AC_BW2 0x372
21#define B43_PHY_AC_BW3 0x373
22#define B43_PHY_AC_BW4 0x374
23#define B43_PHY_AC_BW5 0x375
24#define B43_PHY_AC_BW6 0x376
25#define B43_PHY_AC_RFCTL_CMD 0x408
26#define B43_PHY_AC_C1_CLIP 0x6d4
27#define B43_PHY_AC_C1_CLIP_DIS 0x4000
28#define B43_PHY_AC_C2_CLIP 0x8d4
29#define B43_PHY_AC_C2_CLIP_DIS 0x4000
30#define B43_PHY_AC_C3_CLIP 0xad4
31#define B43_PHY_AC_C3_CLIP_DIS 0x4000
32
33struct b43_phy_ac {
34};
35
36extern const struct b43_phy_operations b43_phyops_ac;
37
38#endif /* B43_PHY_AC_H_ */
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index ee27b06074e1..ec2b9c577b90 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -33,6 +33,7 @@
33#include "phy_lp.h" 33#include "phy_lp.h"
34#include "phy_ht.h" 34#include "phy_ht.h"
35#include "phy_lcn.h" 35#include "phy_lcn.h"
36#include "phy_ac.h"
36#include "b43.h" 37#include "b43.h"
37#include "main.h" 38#include "main.h"
38 39
@@ -70,6 +71,11 @@ int b43_phy_allocate(struct b43_wldev *dev)
70 phy->ops = &b43_phyops_lcn; 71 phy->ops = &b43_phyops_lcn;
71#endif 72#endif
72 break; 73 break;
74 case B43_PHYTYPE_AC:
75#ifdef CONFIG_B43_PHY_AC
76 phy->ops = &b43_phyops_ac;
77#endif
78 break;
73 } 79 }
74 if (B43_WARN_ON(!phy->ops)) 80 if (B43_WARN_ON(!phy->ops))
75 return -ENODEV; 81 return -ENODEV;
@@ -572,7 +578,8 @@ void b43_phy_force_clock(struct b43_wldev *dev, bool force)
572 u32 tmp; 578 u32 tmp;
573 579
574 WARN_ON(dev->phy.type != B43_PHYTYPE_N && 580 WARN_ON(dev->phy.type != B43_PHYTYPE_N &&
575 dev->phy.type != B43_PHYTYPE_HT); 581 dev->phy.type != B43_PHYTYPE_HT &&
582 dev->phy.type != B43_PHYTYPE_AC);
576 583
577 switch (dev->dev->bus_type) { 584 switch (dev->dev->bus_type) {
578#ifdef CONFIG_B43_BCMA 585#ifdef CONFIG_B43_BCMA
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 3912274f71e3..78d86526799e 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -222,6 +222,8 @@ struct b43_phy {
222 struct b43_phy_ht *ht; 222 struct b43_phy_ht *ht;
223 /* LCN-PHY specific information */ 223 /* LCN-PHY specific information */
224 struct b43_phy_lcn *lcn; 224 struct b43_phy_lcn *lcn;
225 /* AC-PHY specific information */
226 struct b43_phy_ac *ac;
225 }; 227 };
226 228
227 /* Band support flags. */ 229 /* Band support flags. */
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index 896177690394..9501420340a9 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -1743,25 +1743,6 @@ u16 freq_r3A_value(u16 frequency)
1743 return value; 1743 return value;
1744} 1744}
1745 1745
1746void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev)
1747{
1748 static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 };
1749 static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A };
1750 u16 tmp = b43legacy_radio_read16(dev, 0x001E);
1751 int i;
1752 int j;
1753
1754 for (i = 0; i < 5; i++) {
1755 for (j = 0; j < 5; j++) {
1756 if (tmp == (data_high[i] | data_low[j])) {
1757 b43legacy_phy_write(dev, 0x0069, (i - j) << 8 |
1758 0x00C0);
1759 return;
1760 }
1761 }
1762 }
1763}
1764
1765int b43legacy_radio_selectchannel(struct b43legacy_wldev *dev, 1746int b43legacy_radio_selectchannel(struct b43legacy_wldev *dev,
1766 u8 channel, 1747 u8 channel,
1767 int synthetic_pu_workaround) 1748 int synthetic_pu_workaround)
diff --git a/drivers/net/wireless/b43legacy/radio.h b/drivers/net/wireless/b43legacy/radio.h
index bccb3d7da682..dd2976d1d561 100644
--- a/drivers/net/wireless/b43legacy/radio.h
+++ b/drivers/net/wireless/b43legacy/radio.h
@@ -92,7 +92,6 @@ void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val);
92void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val); 92void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val);
93void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev); 93void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev);
94 94
95void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev);
96u16 b43legacy_radio_calibrationvalue(struct b43legacy_wldev *dev); 95u16 b43legacy_radio_calibrationvalue(struct b43legacy_wldev *dev);
97 96
98#endif /* B43legacy_RADIO_H_ */ 97#endif /* B43legacy_RADIO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 9880dae2a569..7944224e3fc9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -97,25 +97,6 @@ static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
97{ 97{
98} 98}
99 99
100static bool brcmf_sdiod_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
101{
102 bool is_err = false;
103#ifdef CONFIG_PM_SLEEP
104 is_err = atomic_read(&sdiodev->suspend);
105#endif
106 return is_err;
107}
108
109static void brcmf_sdiod_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
110 wait_queue_head_t *wq)
111{
112#ifdef CONFIG_PM_SLEEP
113 int retry = 0;
114 while (atomic_read(&sdiodev->suspend) && retry++ != 30)
115 wait_event_timeout(*wq, false, HZ/100);
116#endif
117}
118
119int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) 100int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
120{ 101{
121 int ret = 0; 102 int ret = 0;
@@ -244,10 +225,6 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
244 brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", 225 brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
245 write, fn, addr, regsz); 226 write, fn, addr, regsz);
246 227
247 brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
248 if (brcmf_sdiod_pm_resume_error(sdiodev))
249 return -EIO;
250
251 /* only allow byte access on F0 */ 228 /* only allow byte access on F0 */
252 if (WARN_ON(regsz > 1 && !fn)) 229 if (WARN_ON(regsz > 1 && !fn))
253 return -EINVAL; 230 return -EINVAL;
@@ -292,6 +269,12 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
292 return ret; 269 return ret;
293} 270}
294 271
272static void brcmf_sdiod_nomedium_state(struct brcmf_sdio_dev *sdiodev)
273{
274 sdiodev->state = BRCMF_STATE_NOMEDIUM;
275 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
276}
277
295static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr, 278static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
296 u8 regsz, void *data, bool write) 279 u8 regsz, void *data, bool write)
297{ 280{
@@ -299,7 +282,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
299 s32 retry = 0; 282 s32 retry = 0;
300 int ret; 283 int ret;
301 284
302 if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM) 285 if (sdiodev->state == BRCMF_STATE_NOMEDIUM)
303 return -ENOMEDIUM; 286 return -ENOMEDIUM;
304 287
305 /* 288 /*
@@ -325,7 +308,7 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
325 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT); 308 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
326 309
327 if (ret == -ENOMEDIUM) 310 if (ret == -ENOMEDIUM)
328 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM); 311 brcmf_sdiod_nomedium_state(sdiodev);
329 else if (ret != 0) { 312 else if (ret != 0) {
330 /* 313 /*
331 * SleepCSR register access can fail when 314 * SleepCSR register access can fail when
@@ -348,7 +331,7 @@ brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
348 int err = 0, i; 331 int err = 0, i;
349 u8 addr[3]; 332 u8 addr[3];
350 333
351 if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM) 334 if (sdiodev->state == BRCMF_STATE_NOMEDIUM)
352 return -ENOMEDIUM; 335 return -ENOMEDIUM;
353 336
354 addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK; 337 addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
@@ -462,10 +445,6 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
462 unsigned int req_sz; 445 unsigned int req_sz;
463 int err; 446 int err;
464 447
465 brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
466 if (brcmf_sdiod_pm_resume_error(sdiodev))
467 return -EIO;
468
469 /* Single skb use the standard mmc interface */ 448 /* Single skb use the standard mmc interface */
470 req_sz = pkt->len + 3; 449 req_sz = pkt->len + 3;
471 req_sz &= (uint)~3; 450 req_sz &= (uint)~3;
@@ -481,7 +460,7 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
481 err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr, 460 err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
482 req_sz); 461 req_sz);
483 if (err == -ENOMEDIUM) 462 if (err == -ENOMEDIUM)
484 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM); 463 brcmf_sdiod_nomedium_state(sdiodev);
485 return err; 464 return err;
486} 465}
487 466
@@ -516,10 +495,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
516 if (!pktlist->qlen) 495 if (!pktlist->qlen)
517 return -EINVAL; 496 return -EINVAL;
518 497
519 brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
520 if (brcmf_sdiod_pm_resume_error(sdiodev))
521 return -EIO;
522
523 target_list = pktlist; 498 target_list = pktlist;
524 /* for host with broken sg support, prepare a page aligned list */ 499 /* for host with broken sg support, prepare a page aligned list */
525 __skb_queue_head_init(&local_list); 500 __skb_queue_head_init(&local_list);
@@ -620,8 +595,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
620 595
621 ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error; 596 ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
622 if (ret == -ENOMEDIUM) { 597 if (ret == -ENOMEDIUM) {
623 brcmf_bus_change_state(sdiodev->bus_if, 598 brcmf_sdiod_nomedium_state(sdiodev);
624 BRCMF_BUS_NOMEDIUM);
625 break; 599 break;
626 } else if (ret != 0) { 600 } else if (ret != 0) {
627 brcmf_err("CMD53 sg block %s failed %d\n", 601 brcmf_err("CMD53 sg block %s failed %d\n",
@@ -996,18 +970,20 @@ out:
996} 970}
997 971
998#define BRCMF_SDIO_DEVICE(dev_id) \ 972#define BRCMF_SDIO_DEVICE(dev_id) \
999 {SDIO_DEVICE(BRCM_SDIO_VENDOR_ID_BROADCOM, dev_id)} 973 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
1000 974
1001/* devices we support, null terminated */ 975/* devices we support, null terminated */
1002static const struct sdio_device_id brcmf_sdmmc_ids[] = { 976static const struct sdio_device_id brcmf_sdmmc_ids[] = {
1003 BRCMF_SDIO_DEVICE(BRCM_SDIO_43143_DEVICE_ID), 977 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
1004 BRCMF_SDIO_DEVICE(BRCM_SDIO_43241_DEVICE_ID), 978 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
1005 BRCMF_SDIO_DEVICE(BRCM_SDIO_4329_DEVICE_ID), 979 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
1006 BRCMF_SDIO_DEVICE(BRCM_SDIO_4330_DEVICE_ID), 980 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
1007 BRCMF_SDIO_DEVICE(BRCM_SDIO_4334_DEVICE_ID), 981 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
1008 BRCMF_SDIO_DEVICE(BRCM_SDIO_43362_DEVICE_ID), 982 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
1009 BRCMF_SDIO_DEVICE(BRCM_SDIO_4335_4339_DEVICE_ID), 983 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
1010 BRCMF_SDIO_DEVICE(BRCM_SDIO_4354_DEVICE_ID), 984 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
985 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
986 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
1011 { /* end: all zeroes */ } 987 { /* end: all zeroes */ }
1012}; 988};
1013MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); 989MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -1074,9 +1050,9 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
1074 bus_if->wowl_supported = true; 1050 bus_if->wowl_supported = true;
1075#endif 1051#endif
1076 1052
1053 sdiodev->sleeping = false;
1077 atomic_set(&sdiodev->suspend, false); 1054 atomic_set(&sdiodev->suspend, false);
1078 init_waitqueue_head(&sdiodev->request_word_wait); 1055 init_waitqueue_head(&sdiodev->idle_wait);
1079 init_waitqueue_head(&sdiodev->request_buffer_wait);
1080 1056
1081 brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n"); 1057 brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
1082 err = brcmf_sdiod_probe(sdiodev); 1058 err = brcmf_sdiod_probe(sdiodev);
@@ -1138,12 +1114,23 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
1138#ifdef CONFIG_PM_SLEEP 1114#ifdef CONFIG_PM_SLEEP
1139static int brcmf_ops_sdio_suspend(struct device *dev) 1115static int brcmf_ops_sdio_suspend(struct device *dev)
1140{ 1116{
1141 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1117 struct brcmf_bus *bus_if;
1142 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 1118 struct brcmf_sdio_dev *sdiodev;
1143 mmc_pm_flag_t sdio_flags; 1119 mmc_pm_flag_t sdio_flags;
1144 1120
1145 brcmf_dbg(SDIO, "Enter\n"); 1121 brcmf_dbg(SDIO, "Enter\n");
1146 1122
1123 bus_if = dev_get_drvdata(dev);
1124 sdiodev = bus_if->bus_priv.sdio;
1125
1126 /* wait for watchdog to go idle */
1127 if (wait_event_timeout(sdiodev->idle_wait, sdiodev->sleeping,
1128 msecs_to_jiffies(3 * BRCMF_WD_POLL_MS)) == 0) {
1129 brcmf_err("bus still active\n");
1130 return -EBUSY;
1131 }
1132 /* disable watchdog */
1133 brcmf_sdio_wd_timer(sdiodev->bus, 0);
1147 atomic_set(&sdiodev->suspend, true); 1134 atomic_set(&sdiodev->suspend, true);
1148 1135
1149 if (sdiodev->wowl_enabled) { 1136 if (sdiodev->wowl_enabled) {
@@ -1155,9 +1142,6 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
1155 if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags)) 1142 if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
1156 brcmf_err("Failed to set pm_flags %x\n", sdio_flags); 1143 brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
1157 } 1144 }
1158
1159 brcmf_sdio_wd_timer(sdiodev->bus, 0);
1160
1161 return 0; 1145 return 0;
1162} 1146}
1163 1147
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
index ef344e47218a..89e6a4dc105e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
@@ -33,11 +33,8 @@
33 33
34/* The level of bus communication with the dongle */ 34/* The level of bus communication with the dongle */
35enum brcmf_bus_state { 35enum brcmf_bus_state {
36 BRCMF_BUS_UNKNOWN, /* Not determined yet */
37 BRCMF_BUS_NOMEDIUM, /* No medium access to dongle */
38 BRCMF_BUS_DOWN, /* Not ready for frame transfers */ 36 BRCMF_BUS_DOWN, /* Not ready for frame transfers */
39 BRCMF_BUS_LOAD, /* Download access only (CPU reset) */ 37 BRCMF_BUS_UP /* Ready for frame transfers */
40 BRCMF_BUS_DATA /* Ready for frame transfers */
41}; 38};
42 39
43/* The level of bus communication with the dongle */ 40/* The level of bus communication with the dongle */
@@ -188,22 +185,6 @@ void brcmf_bus_wowl_config(struct brcmf_bus *bus, bool enabled)
188 bus->ops->wowl_config(bus->dev, enabled); 185 bus->ops->wowl_config(bus->dev, enabled);
189} 186}
190 187
191static inline bool brcmf_bus_ready(struct brcmf_bus *bus)
192{
193 return bus->state == BRCMF_BUS_LOAD || bus->state == BRCMF_BUS_DATA;
194}
195
196static inline void brcmf_bus_change_state(struct brcmf_bus *bus,
197 enum brcmf_bus_state new_state)
198{
199 /* NOMEDIUM is permanent */
200 if (bus->state == BRCMF_BUS_NOMEDIUM)
201 return;
202
203 brcmf_dbg(TRACE, "%d -> %d\n", bus->state, new_state);
204 bus->state = new_state;
205}
206
207/* 188/*
208 * interface functions from common layer 189 * interface functions from common layer
209 */ 190 */
@@ -226,6 +207,9 @@ void brcmf_txflowblock(struct device *dev, bool state);
226/* Notify the bus has transferred the tx packet to firmware */ 207/* Notify the bus has transferred the tx packet to firmware */
227void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success); 208void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
228 209
210/* Configure the "global" bus state used by upper layers */
211void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
212
229int brcmf_bus_start(struct device *dev); 213int brcmf_bus_start(struct device *dev);
230s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len); 214s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len);
231void brcmf_bus_add_txhdrlen(struct device *dev, uint len); 215void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 3aecc5f48719..b59b8c6c42ab 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -38,6 +38,7 @@
38#include "proto.h" 38#include "proto.h"
39#include "vendor.h" 39#include "vendor.h"
40#include "bus.h" 40#include "bus.h"
41#include "common.h"
41 42
42#define BRCMF_SCAN_IE_LEN_MAX 2048 43#define BRCMF_SCAN_IE_LEN_MAX 2048
43#define BRCMF_PNO_VERSION 2 44#define BRCMF_PNO_VERSION 2
@@ -452,16 +453,16 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
452} 453}
453 454
454static int 455static int
455send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key) 456send_key_to_dongle(struct brcmf_if *ifp, struct brcmf_wsec_key *key)
456{ 457{
457 int err; 458 int err;
458 struct brcmf_wsec_key_le key_le; 459 struct brcmf_wsec_key_le key_le;
459 460
460 convert_key_from_CPU(key, &key_le); 461 convert_key_from_CPU(key, &key_le);
461 462
462 brcmf_netdev_wait_pend8021x(ndev); 463 brcmf_netdev_wait_pend8021x(ifp);
463 464
464 err = brcmf_fil_bsscfg_data_set(netdev_priv(ndev), "wsec_key", &key_le, 465 err = brcmf_fil_bsscfg_data_set(ifp, "wsec_key", &key_le,
465 sizeof(key_le)); 466 sizeof(key_le));
466 467
467 if (err) 468 if (err)
@@ -1228,7 +1229,25 @@ static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
1228 memset(prof, 0, sizeof(*prof)); 1229 memset(prof, 0, sizeof(*prof));
1229} 1230}
1230 1231
1231static void brcmf_link_down(struct brcmf_cfg80211_vif *vif) 1232static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
1233{
1234 u16 reason;
1235
1236 switch (e->event_code) {
1237 case BRCMF_E_DEAUTH:
1238 case BRCMF_E_DEAUTH_IND:
1239 case BRCMF_E_DISASSOC_IND:
1240 reason = e->reason;
1241 break;
1242 case BRCMF_E_LINK:
1243 default:
1244 reason = 0;
1245 break;
1246 }
1247 return reason;
1248}
1249
1250static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
1232{ 1251{
1233 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy); 1252 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
1234 s32 err = 0; 1253 s32 err = 0;
@@ -1243,7 +1262,8 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
1243 brcmf_err("WLC_DISASSOC failed (%d)\n", err); 1262 brcmf_err("WLC_DISASSOC failed (%d)\n", err);
1244 } 1263 }
1245 clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state); 1264 clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
1246 cfg80211_disconnected(vif->wdev.netdev, 0, NULL, 0, GFP_KERNEL); 1265 cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
1266 GFP_KERNEL);
1247 1267
1248 } 1268 }
1249 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state); 1269 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
@@ -1413,7 +1433,7 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
1413 if (!check_vif_up(ifp->vif)) 1433 if (!check_vif_up(ifp->vif))
1414 return -EIO; 1434 return -EIO;
1415 1435
1416 brcmf_link_down(ifp->vif); 1436 brcmf_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING);
1417 1437
1418 brcmf_dbg(TRACE, "Exit\n"); 1438 brcmf_dbg(TRACE, "Exit\n");
1419 1439
@@ -1670,7 +1690,7 @@ brcmf_set_sharedkey(struct net_device *ndev,
1670 brcmf_dbg(CONN, "key length (%d) key index (%d) algo (%d)\n", 1690 brcmf_dbg(CONN, "key length (%d) key index (%d) algo (%d)\n",
1671 key.len, key.index, key.algo); 1691 key.len, key.index, key.algo);
1672 brcmf_dbg(CONN, "key \"%s\"\n", key.data); 1692 brcmf_dbg(CONN, "key \"%s\"\n", key.data);
1673 err = send_key_to_dongle(ndev, &key); 1693 err = send_key_to_dongle(netdev_priv(ndev), &key);
1674 if (err) 1694 if (err)
1675 return err; 1695 return err;
1676 1696
@@ -2052,7 +2072,7 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
2052 /* check for key index change */ 2072 /* check for key index change */
2053 if (key.len == 0) { 2073 if (key.len == 0) {
2054 /* key delete */ 2074 /* key delete */
2055 err = send_key_to_dongle(ndev, &key); 2075 err = send_key_to_dongle(ifp, &key);
2056 if (err) 2076 if (err)
2057 brcmf_err("key delete error (%d)\n", err); 2077 brcmf_err("key delete error (%d)\n", err);
2058 } else { 2078 } else {
@@ -2108,7 +2128,7 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
2108 brcmf_err("Invalid cipher (0x%x)\n", params->cipher); 2128 brcmf_err("Invalid cipher (0x%x)\n", params->cipher);
2109 return -EINVAL; 2129 return -EINVAL;
2110 } 2130 }
2111 err = send_key_to_dongle(ndev, &key); 2131 err = send_key_to_dongle(ifp, &key);
2112 if (err) 2132 if (err)
2113 brcmf_err("wsec_key error (%d)\n", err); 2133 brcmf_err("wsec_key error (%d)\n", err);
2114 } 2134 }
@@ -2121,7 +2141,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
2121 struct key_params *params) 2141 struct key_params *params)
2122{ 2142{
2123 struct brcmf_if *ifp = netdev_priv(ndev); 2143 struct brcmf_if *ifp = netdev_priv(ndev);
2124 struct brcmf_wsec_key key; 2144 struct brcmf_wsec_key *key;
2125 s32 val; 2145 s32 val;
2126 s32 wsec; 2146 s32 wsec;
2127 s32 err = 0; 2147 s32 err = 0;
@@ -2132,54 +2152,62 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
2132 if (!check_vif_up(ifp->vif)) 2152 if (!check_vif_up(ifp->vif))
2133 return -EIO; 2153 return -EIO;
2134 2154
2155 if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
2156 /* we ignore this key index in this case */
2157 brcmf_err("invalid key index (%d)\n", key_idx);
2158 return -EINVAL;
2159 }
2160
2135 if (mac_addr && 2161 if (mac_addr &&
2136 (params->cipher != WLAN_CIPHER_SUITE_WEP40) && 2162 (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
2137 (params->cipher != WLAN_CIPHER_SUITE_WEP104)) { 2163 (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
2138 brcmf_dbg(TRACE, "Exit"); 2164 brcmf_dbg(TRACE, "Exit");
2139 return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params); 2165 return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
2140 } 2166 }
2141 memset(&key, 0, sizeof(key));
2142 2167
2143 key.len = (u32) params->key_len; 2168 key = &ifp->vif->profile.key[key_idx];
2144 key.index = (u32) key_idx; 2169 memset(key, 0, sizeof(*key));
2145 2170
2146 if (key.len > sizeof(key.data)) { 2171 if (params->key_len > sizeof(key->data)) {
2147 brcmf_err("Too long key length (%u)\n", key.len); 2172 brcmf_err("Too long key length (%u)\n", params->key_len);
2148 err = -EINVAL; 2173 err = -EINVAL;
2149 goto done; 2174 goto done;
2150 } 2175 }
2151 memcpy(key.data, params->key, key.len); 2176 key->len = params->key_len;
2177 key->index = key_idx;
2152 2178
2153 key.flags = BRCMF_PRIMARY_KEY; 2179 memcpy(key->data, params->key, key->len);
2180
2181 key->flags = BRCMF_PRIMARY_KEY;
2154 switch (params->cipher) { 2182 switch (params->cipher) {
2155 case WLAN_CIPHER_SUITE_WEP40: 2183 case WLAN_CIPHER_SUITE_WEP40:
2156 key.algo = CRYPTO_ALGO_WEP1; 2184 key->algo = CRYPTO_ALGO_WEP1;
2157 val = WEP_ENABLED; 2185 val = WEP_ENABLED;
2158 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n"); 2186 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
2159 break; 2187 break;
2160 case WLAN_CIPHER_SUITE_WEP104: 2188 case WLAN_CIPHER_SUITE_WEP104:
2161 key.algo = CRYPTO_ALGO_WEP128; 2189 key->algo = CRYPTO_ALGO_WEP128;
2162 val = WEP_ENABLED; 2190 val = WEP_ENABLED;
2163 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); 2191 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
2164 break; 2192 break;
2165 case WLAN_CIPHER_SUITE_TKIP: 2193 case WLAN_CIPHER_SUITE_TKIP:
2166 if (!brcmf_is_apmode(ifp->vif)) { 2194 if (!brcmf_is_apmode(ifp->vif)) {
2167 brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); 2195 brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
2168 memcpy(keybuf, &key.data[24], sizeof(keybuf)); 2196 memcpy(keybuf, &key->data[24], sizeof(keybuf));
2169 memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); 2197 memcpy(&key->data[24], &key->data[16], sizeof(keybuf));
2170 memcpy(&key.data[16], keybuf, sizeof(keybuf)); 2198 memcpy(&key->data[16], keybuf, sizeof(keybuf));
2171 } 2199 }
2172 key.algo = CRYPTO_ALGO_TKIP; 2200 key->algo = CRYPTO_ALGO_TKIP;
2173 val = TKIP_ENABLED; 2201 val = TKIP_ENABLED;
2174 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); 2202 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
2175 break; 2203 break;
2176 case WLAN_CIPHER_SUITE_AES_CMAC: 2204 case WLAN_CIPHER_SUITE_AES_CMAC:
2177 key.algo = CRYPTO_ALGO_AES_CCM; 2205 key->algo = CRYPTO_ALGO_AES_CCM;
2178 val = AES_ENABLED; 2206 val = AES_ENABLED;
2179 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); 2207 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
2180 break; 2208 break;
2181 case WLAN_CIPHER_SUITE_CCMP: 2209 case WLAN_CIPHER_SUITE_CCMP:
2182 key.algo = CRYPTO_ALGO_AES_CCM; 2210 key->algo = CRYPTO_ALGO_AES_CCM;
2183 val = AES_ENABLED; 2211 val = AES_ENABLED;
2184 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n"); 2212 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
2185 break; 2213 break;
@@ -2189,7 +2217,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
2189 goto done; 2217 goto done;
2190 } 2218 }
2191 2219
2192 err = send_key_to_dongle(ndev, &key); 2220 err = send_key_to_dongle(ifp, key);
2193 if (err) 2221 if (err)
2194 goto done; 2222 goto done;
2195 2223
@@ -2222,7 +2250,7 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
2222 if (!check_vif_up(ifp->vif)) 2250 if (!check_vif_up(ifp->vif))
2223 return -EIO; 2251 return -EIO;
2224 2252
2225 if (key_idx >= DOT11_MAX_DEFAULT_KEYS) { 2253 if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
2226 /* we ignore this key index in this case */ 2254 /* we ignore this key index in this case */
2227 brcmf_err("invalid key index (%d)\n", key_idx); 2255 brcmf_err("invalid key index (%d)\n", key_idx);
2228 return -EINVAL; 2256 return -EINVAL;
@@ -2237,7 +2265,7 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
2237 brcmf_dbg(CONN, "key index (%d)\n", key_idx); 2265 brcmf_dbg(CONN, "key index (%d)\n", key_idx);
2238 2266
2239 /* Set the new key/index */ 2267 /* Set the new key/index */
2240 err = send_key_to_dongle(ndev, &key); 2268 err = send_key_to_dongle(ifp, &key);
2241 2269
2242 brcmf_dbg(TRACE, "Exit\n"); 2270 brcmf_dbg(TRACE, "Exit\n");
2243 return err; 2271 return err;
@@ -2305,6 +2333,39 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
2305 return -EOPNOTSUPP; 2333 return -EOPNOTSUPP;
2306} 2334}
2307 2335
2336static void
2337brcmf_cfg80211_reconfigure_wep(struct brcmf_if *ifp)
2338{
2339 s32 err;
2340 u8 key_idx;
2341 struct brcmf_wsec_key *key;
2342 s32 wsec;
2343
2344 for (key_idx = 0; key_idx < BRCMF_MAX_DEFAULT_KEYS; key_idx++) {
2345 key = &ifp->vif->profile.key[key_idx];
2346 if ((key->algo == CRYPTO_ALGO_WEP1) ||
2347 (key->algo == CRYPTO_ALGO_WEP128))
2348 break;
2349 }
2350 if (key_idx == BRCMF_MAX_DEFAULT_KEYS)
2351 return;
2352
2353 err = send_key_to_dongle(ifp, key);
2354 if (err) {
2355 brcmf_err("Setting WEP key failed (%d)\n", err);
2356 return;
2357 }
2358 err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
2359 if (err) {
2360 brcmf_err("get wsec error (%d)\n", err);
2361 return;
2362 }
2363 wsec |= WEP_ENABLED;
2364 err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
2365 if (err)
2366 brcmf_err("set wsec error (%d)\n", err);
2367}
2368
2308static s32 2369static s32
2309brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, 2370brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2310 const u8 *mac, struct station_info *sinfo) 2371 const u8 *mac, struct station_info *sinfo)
@@ -2333,10 +2394,10 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2333 brcmf_err("GET STA INFO failed, %d\n", err); 2394 brcmf_err("GET STA INFO failed, %d\n", err);
2334 goto done; 2395 goto done;
2335 } 2396 }
2336 sinfo->filled = STATION_INFO_INACTIVE_TIME; 2397 sinfo->filled = BIT(NL80211_STA_INFO_INACTIVE_TIME);
2337 sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000; 2398 sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
2338 if (le32_to_cpu(sta_info_le.flags) & BRCMF_STA_ASSOC) { 2399 if (le32_to_cpu(sta_info_le.flags) & BRCMF_STA_ASSOC) {
2339 sinfo->filled |= STATION_INFO_CONNECTED_TIME; 2400 sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
2340 sinfo->connected_time = le32_to_cpu(sta_info_le.in); 2401 sinfo->connected_time = le32_to_cpu(sta_info_le.in);
2341 } 2402 }
2342 brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n", 2403 brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
@@ -2354,7 +2415,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2354 brcmf_err("Could not get rate (%d)\n", err); 2415 brcmf_err("Could not get rate (%d)\n", err);
2355 goto done; 2416 goto done;
2356 } else { 2417 } else {
2357 sinfo->filled |= STATION_INFO_TX_BITRATE; 2418 sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
2358 sinfo->txrate.legacy = rate * 5; 2419 sinfo->txrate.legacy = rate * 5;
2359 brcmf_dbg(CONN, "Rate %d Mbps\n", rate / 2); 2420 brcmf_dbg(CONN, "Rate %d Mbps\n", rate / 2);
2360 } 2421 }
@@ -2369,7 +2430,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2369 goto done; 2430 goto done;
2370 } else { 2431 } else {
2371 rssi = le32_to_cpu(scb_val.val); 2432 rssi = le32_to_cpu(scb_val.val);
2372 sinfo->filled |= STATION_INFO_SIGNAL; 2433 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
2373 sinfo->signal = rssi; 2434 sinfo->signal = rssi;
2374 brcmf_dbg(CONN, "RSSI %d dBm\n", rssi); 2435 brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
2375 } 2436 }
@@ -2396,7 +2457,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2396 brcmf_dbg(CONN, "DTIM peroid %d\n", 2457 brcmf_dbg(CONN, "DTIM peroid %d\n",
2397 dtim_period); 2458 dtim_period);
2398 } 2459 }
2399 sinfo->filled |= STATION_INFO_BSS_PARAM; 2460 sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
2400 } 2461 }
2401 } else 2462 } else
2402 err = -EPERM; 2463 err = -EPERM;
@@ -2999,7 +3060,7 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
2999 * disassociate from AP to save power while system is 3060 * disassociate from AP to save power while system is
3000 * in suspended state 3061 * in suspended state
3001 */ 3062 */
3002 brcmf_link_down(vif); 3063 brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED);
3003 /* Make sure WPA_Supplicant receives all the event 3064 /* Make sure WPA_Supplicant receives all the event
3004 * generated due to DISASSOC call to the fw to keep 3065 * generated due to DISASSOC call to the fw to keep
3005 * the state fw and WPA_Supplicant state consistent 3066 * the state fw and WPA_Supplicant state consistent
@@ -3695,17 +3756,12 @@ static u32
3695brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd) 3756brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
3696{ 3757{
3697 3758
3698 __le32 iecount_le;
3699 __le32 pktflag_le;
3700
3701 strncpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN - 1); 3759 strncpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN - 1);
3702 iebuf[VNDR_IE_CMD_LEN - 1] = '\0'; 3760 iebuf[VNDR_IE_CMD_LEN - 1] = '\0';
3703 3761
3704 iecount_le = cpu_to_le32(1); 3762 put_unaligned_le32(1, &iebuf[VNDR_IE_COUNT_OFFSET]);
3705 memcpy(&iebuf[VNDR_IE_COUNT_OFFSET], &iecount_le, sizeof(iecount_le));
3706 3763
3707 pktflag_le = cpu_to_le32(pktflag); 3764 put_unaligned_le32(pktflag, &iebuf[VNDR_IE_PKTFLAG_OFFSET]);
3708 memcpy(&iebuf[VNDR_IE_PKTFLAG_OFFSET], &pktflag_le, sizeof(pktflag_le));
3709 3765
3710 memcpy(&iebuf[VNDR_IE_VSIE_OFFSET], ie_ptr, ie_len); 3766 memcpy(&iebuf[VNDR_IE_VSIE_OFFSET], ie_ptr, ie_len);
3711 3767
@@ -3924,6 +3980,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3924 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 3980 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3925 struct brcmf_if *ifp = netdev_priv(ndev); 3981 struct brcmf_if *ifp = netdev_priv(ndev);
3926 const struct brcmf_tlv *ssid_ie; 3982 const struct brcmf_tlv *ssid_ie;
3983 const struct brcmf_tlv *country_ie;
3927 struct brcmf_ssid_le ssid_le; 3984 struct brcmf_ssid_le ssid_le;
3928 s32 err = -EPERM; 3985 s32 err = -EPERM;
3929 const struct brcmf_tlv *rsn_ie; 3986 const struct brcmf_tlv *rsn_ie;
@@ -3933,6 +3990,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3933 struct brcmf_fil_bss_enable_le bss_enable; 3990 struct brcmf_fil_bss_enable_le bss_enable;
3934 u16 chanspec; 3991 u16 chanspec;
3935 bool mbss; 3992 bool mbss;
3993 int is_11d;
3936 3994
3937 brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n", 3995 brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
3938 settings->chandef.chan->hw_value, 3996 settings->chandef.chan->hw_value,
@@ -3941,10 +3999,16 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
3941 brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n", 3999 brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
3942 settings->ssid, settings->ssid_len, settings->auth_type, 4000 settings->ssid, settings->ssid_len, settings->auth_type,
3943 settings->inactivity_timeout); 4001 settings->inactivity_timeout);
3944
3945 dev_role = ifp->vif->wdev.iftype; 4002 dev_role = ifp->vif->wdev.iftype;
3946 mbss = ifp->vif->mbss; 4003 mbss = ifp->vif->mbss;
3947 4004
4005 /* store current 11d setting */
4006 brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, &ifp->vif->is_11d);
4007 country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
4008 settings->beacon.tail_len,
4009 WLAN_EID_COUNTRY);
4010 is_11d = country_ie ? 1 : 0;
4011
3948 memset(&ssid_le, 0, sizeof(ssid_le)); 4012 memset(&ssid_le, 0, sizeof(ssid_le));
3949 if (settings->ssid == NULL || settings->ssid_len == 0) { 4013 if (settings->ssid == NULL || settings->ssid_len == 0) {
3950 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; 4014 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
@@ -4010,6 +4074,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4010 goto exit; 4074 goto exit;
4011 } 4075 }
4012 4076
4077 if (is_11d != ifp->vif->is_11d) {
4078 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
4079 is_11d);
4080 if (err < 0) {
4081 brcmf_err("Regulatory Set Error, %d\n", err);
4082 goto exit;
4083 }
4084 }
4013 if (settings->beacon_interval) { 4085 if (settings->beacon_interval) {
4014 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, 4086 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
4015 settings->beacon_interval); 4087 settings->beacon_interval);
@@ -4042,6 +4114,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4042 brcmf_err("SET INFRA error %d\n", err); 4114 brcmf_err("SET INFRA error %d\n", err);
4043 goto exit; 4115 goto exit;
4044 } 4116 }
4117 } else if (WARN_ON(is_11d != ifp->vif->is_11d)) {
4118 /* Multiple-BSS should use same 11d configuration */
4119 err = -EINVAL;
4120 goto exit;
4045 } 4121 }
4046 if (dev_role == NL80211_IFTYPE_AP) { 4122 if (dev_role == NL80211_IFTYPE_AP) {
4047 if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss)) 4123 if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss))
@@ -4057,6 +4133,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4057 brcmf_err("BRCMF_C_UP error (%d)\n", err); 4133 brcmf_err("BRCMF_C_UP error (%d)\n", err);
4058 goto exit; 4134 goto exit;
4059 } 4135 }
4136 /* On DOWN the firmware removes the WEP keys, reconfigure
4137 * them if they were set.
4138 */
4139 brcmf_cfg80211_reconfigure_wep(ifp);
4060 4140
4061 memset(&join_params, 0, sizeof(join_params)); 4141 memset(&join_params, 0, sizeof(join_params));
4062 /* join parameters starts with ssid */ 4142 /* join parameters starts with ssid */
@@ -4133,6 +4213,11 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
4133 brcmf_err("setting INFRA mode failed %d\n", err); 4213 brcmf_err("setting INFRA mode failed %d\n", err);
4134 if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) 4214 if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
4135 brcmf_fil_iovar_int_set(ifp, "mbss", 0); 4215 brcmf_fil_iovar_int_set(ifp, "mbss", 0);
4216 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
4217 ifp->vif->is_11d);
4218 if (err < 0)
4219 brcmf_err("restoring REGULATORY setting failed %d\n",
4220 err);
4136 /* Bring device back up so it can be used again */ 4221 /* Bring device back up so it can be used again */
4137 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1); 4222 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
4138 if (err < 0) 4223 if (err < 0)
@@ -4197,6 +4282,34 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
4197 return err; 4282 return err;
4198} 4283}
4199 4284
4285static int
4286brcmf_cfg80211_change_station(struct wiphy *wiphy, struct net_device *ndev,
4287 const u8 *mac, struct station_parameters *params)
4288{
4289 struct brcmf_if *ifp = netdev_priv(ndev);
4290 s32 err;
4291
4292 brcmf_dbg(TRACE, "Enter, MAC %pM, mask 0x%04x set 0x%04x\n", mac,
4293 params->sta_flags_mask, params->sta_flags_set);
4294
4295 /* Ignore all 00 MAC */
4296 if (is_zero_ether_addr(mac))
4297 return 0;
4298
4299 if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
4300 return 0;
4301
4302 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
4303 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SCB_AUTHORIZE,
4304 (void *)mac, ETH_ALEN);
4305 else
4306 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SCB_DEAUTHORIZE,
4307 (void *)mac, ETH_ALEN);
4308 if (err < 0)
4309 brcmf_err("Setting SCB (de-)authorize failed, %d\n", err);
4310
4311 return err;
4312}
4200 4313
4201static void 4314static void
4202brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy, 4315brcmf_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
@@ -4471,6 +4584,7 @@ static struct cfg80211_ops wl_cfg80211_ops = {
4471 .stop_ap = brcmf_cfg80211_stop_ap, 4584 .stop_ap = brcmf_cfg80211_stop_ap,
4472 .change_beacon = brcmf_cfg80211_change_beacon, 4585 .change_beacon = brcmf_cfg80211_change_beacon,
4473 .del_station = brcmf_cfg80211_del_station, 4586 .del_station = brcmf_cfg80211_del_station,
4587 .change_station = brcmf_cfg80211_change_station,
4474 .sched_scan_start = brcmf_cfg80211_sched_scan_start, 4588 .sched_scan_start = brcmf_cfg80211_sched_scan_start,
4475 .sched_scan_stop = brcmf_cfg80211_sched_scan_stop, 4589 .sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
4476 .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register, 4590 .mgmt_frame_register = brcmf_cfg80211_mgmt_frame_register,
@@ -4778,7 +4892,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
4778 if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) && 4892 if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
4779 (reason == BRCMF_E_STATUS_SUCCESS)) { 4893 (reason == BRCMF_E_STATUS_SUCCESS)) {
4780 memset(&sinfo, 0, sizeof(sinfo)); 4894 memset(&sinfo, 0, sizeof(sinfo));
4781 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
4782 if (!data) { 4895 if (!data) {
4783 brcmf_err("No IEs present in ASSOC/REASSOC_IND"); 4896 brcmf_err("No IEs present in ASSOC/REASSOC_IND");
4784 return -EINVAL; 4897 return -EINVAL;
@@ -4833,7 +4946,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
4833 if (!brcmf_is_ibssmode(ifp->vif)) { 4946 if (!brcmf_is_ibssmode(ifp->vif)) {
4834 brcmf_bss_connect_done(cfg, ndev, e, false); 4947 brcmf_bss_connect_done(cfg, ndev, e, false);
4835 } 4948 }
4836 brcmf_link_down(ifp->vif); 4949 brcmf_link_down(ifp->vif, brcmf_map_fw_linkdown_reason(e));
4837 brcmf_init_prof(ndev_to_prof(ndev)); 4950 brcmf_init_prof(ndev_to_prof(ndev));
4838 if (ndev != cfg_to_ndev(cfg)) 4951 if (ndev != cfg_to_ndev(cfg))
4839 complete(&cfg->vif_disabled); 4952 complete(&cfg->vif_disabled);
@@ -5774,7 +5887,7 @@ static s32 __brcmf_cfg80211_down(struct brcmf_if *ifp)
5774 * from AP to save power 5887 * from AP to save power
5775 */ 5888 */
5776 if (check_vif_up(ifp->vif)) { 5889 if (check_vif_up(ifp->vif)) {
5777 brcmf_link_down(ifp->vif); 5890 brcmf_link_down(ifp->vif, WLAN_REASON_UNSPECIFIED);
5778 5891
5779 /* Make sure WPA_Supplicant receives all the event 5892 /* Make sure WPA_Supplicant receives all the event
5780 generated due to DISASSOC call to the fw to keep 5893 generated due to DISASSOC call to the fw to keep
@@ -5876,6 +5989,29 @@ int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
5876 vif_event_equals(event, action), timeout); 5989 vif_event_equals(event, action), timeout);
5877} 5990}
5878 5991
5992static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
5993 struct regulatory_request *req)
5994{
5995 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
5996 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
5997 struct brcmf_fil_country_le ccreq;
5998 int i;
5999
6000 brcmf_dbg(TRACE, "enter: initiator=%d, alpha=%c%c\n", req->initiator,
6001 req->alpha2[0], req->alpha2[1]);
6002
6003 /* ignore non-ISO3166 country codes */
6004 for (i = 0; i < sizeof(req->alpha2); i++)
6005 if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
6006 brcmf_err("not a ISO3166 code\n");
6007 return;
6008 }
6009 memset(&ccreq, 0, sizeof(ccreq));
6010 ccreq.rev = cpu_to_le32(-1);
6011 memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
6012 brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
6013}
6014
5879static void brcmf_free_wiphy(struct wiphy *wiphy) 6015static void brcmf_free_wiphy(struct wiphy *wiphy)
5880{ 6016{
5881 kfree(wiphy->iface_combinations); 6017 kfree(wiphy->iface_combinations);
@@ -5952,6 +6088,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
5952 goto priv_out; 6088 goto priv_out;
5953 6089
5954 brcmf_dbg(INFO, "Registering custom regulatory\n"); 6090 brcmf_dbg(INFO, "Registering custom regulatory\n");
6091 wiphy->reg_notifier = brcmf_cfg80211_reg_notifier;
5955 wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; 6092 wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
5956 wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom); 6093 wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
5957 6094
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
index 9e98b8d52757..d9e6d01b2b69 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
@@ -75,6 +75,8 @@
75 75
76#define BRCMF_VNDR_IE_P2PAF_SHIFT 12 76#define BRCMF_VNDR_IE_P2PAF_SHIFT 12
77 77
78#define BRCMF_MAX_DEFAULT_KEYS 4
79
78 80
79/** 81/**
80 * enum brcmf_scan_status - scan engine status 82 * enum brcmf_scan_status - scan engine status
@@ -125,11 +127,13 @@ struct brcmf_cfg80211_security {
125 * @ssid: ssid of associated/associating ap. 127 * @ssid: ssid of associated/associating ap.
126 * @bssid: bssid of joined/joining ibss. 128 * @bssid: bssid of joined/joining ibss.
127 * @sec: security information. 129 * @sec: security information.
130 * @key: key information
128 */ 131 */
129struct brcmf_cfg80211_profile { 132struct brcmf_cfg80211_profile {
130 struct brcmf_ssid ssid; 133 struct brcmf_ssid ssid;
131 u8 bssid[ETH_ALEN]; 134 u8 bssid[ETH_ALEN];
132 struct brcmf_cfg80211_security sec; 135 struct brcmf_cfg80211_security sec;
136 struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS];
133}; 137};
134 138
135/** 139/**
@@ -196,6 +200,7 @@ struct brcmf_cfg80211_vif {
196 struct list_head list; 200 struct list_head list;
197 u16 mgmt_rx_reg; 201 u16 mgmt_rx_reg;
198 bool mbss; 202 bool mbss;
203 int is_11d;
199}; 204};
200 205
201/* association inform */ 206/* association inform */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index ddae0b5e56ec..04d2ca0d87d6 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -101,14 +101,7 @@
101/* ARM Cortex M3 core, ID 0x82a */ 101/* ARM Cortex M3 core, ID 0x82a */
102#define BCM4329_CORE_ARM_BASE 0x18002000 102#define BCM4329_CORE_ARM_BASE 0x18002000
103#define BCM4329_RAMSIZE 0x48000 103#define BCM4329_RAMSIZE 0x48000
104
105/* bcm43143 */ 104/* bcm43143 */
106/* SDIO device core */
107#define BCM43143_CORE_BUS_BASE 0x18002000
108/* internal memory core */
109#define BCM43143_CORE_SOCRAM_BASE 0x18004000
110/* ARM Cortex M3 core, ID 0x82a */
111#define BCM43143_CORE_ARM_BASE 0x18003000
112#define BCM43143_RAMSIZE 0x70000 105#define BCM43143_RAMSIZE 0x70000
113 106
114#define CORE_SB(base, field) \ 107#define CORE_SB(base, field) \
@@ -164,13 +157,6 @@ struct brcmf_core_priv {
164 struct brcmf_chip_priv *chip; 157 struct brcmf_chip_priv *chip;
165}; 158};
166 159
167/* ARM CR4 core specific control flag bits */
168#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
169
170/* D11 core specific control flag bits */
171#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
172#define D11_BCMA_IOCTL_PHYRESET 0x0008
173
174struct brcmf_chip_priv { 160struct brcmf_chip_priv {
175 struct brcmf_chip pub; 161 struct brcmf_chip pub;
176 const struct brcmf_buscore_ops *ops; 162 const struct brcmf_buscore_ops *ops;
@@ -495,6 +481,7 @@ static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
495 ci->pub.ramsize = 0x48000; 481 ci->pub.ramsize = 0x48000;
496 break; 482 break;
497 case BRCM_CC_4334_CHIP_ID: 483 case BRCM_CC_4334_CHIP_ID:
484 case BRCM_CC_43340_CHIP_ID:
498 ci->pub.ramsize = 0x80000; 485 ci->pub.ramsize = 0x80000;
499 break; 486 break;
500 case BRCM_CC_4335_CHIP_ID: 487 case BRCM_CC_4335_CHIP_ID:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.c b/drivers/net/wireless/brcm80211/brcmfmac/common.c
index 1861a13e8d03..fe54844c75e0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.c
@@ -25,6 +25,9 @@
25#include "fwil.h" 25#include "fwil.h"
26#include "fwil_types.h" 26#include "fwil_types.h"
27#include "tracepoint.h" 27#include "tracepoint.h"
28#include "common.h"
29
30const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
28 31
29#define BRCMF_DEFAULT_BCN_TIMEOUT 3 32#define BRCMF_DEFAULT_BCN_TIMEOUT 3
30#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 33#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
@@ -38,6 +41,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
38 s8 eventmask[BRCMF_EVENTING_MASK_LEN]; 41 s8 eventmask[BRCMF_EVENTING_MASK_LEN];
39 u8 buf[BRCMF_DCMD_SMLEN]; 42 u8 buf[BRCMF_DCMD_SMLEN];
40 struct brcmf_join_pref_params join_pref_params[2]; 43 struct brcmf_join_pref_params join_pref_params[2];
44 struct brcmf_rev_info_le revinfo;
45 struct brcmf_rev_info *ri;
41 char *ptr; 46 char *ptr;
42 s32 err; 47 s32 err;
43 48
@@ -45,12 +50,37 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
45 err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr, 50 err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
46 sizeof(ifp->mac_addr)); 51 sizeof(ifp->mac_addr));
47 if (err < 0) { 52 if (err < 0) {
48 brcmf_err("Retreiving cur_etheraddr failed, %d\n", 53 brcmf_err("Retreiving cur_etheraddr failed, %d\n", err);
49 err);
50 goto done; 54 goto done;
51 } 55 }
52 memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac)); 56 memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
53 57
58 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_REVINFO,
59 &revinfo, sizeof(revinfo));
60 ri = &ifp->drvr->revinfo;
61 if (err < 0) {
62 brcmf_err("retrieving revision info failed, %d\n", err);
63 } else {
64 ri->vendorid = le32_to_cpu(revinfo.vendorid);
65 ri->deviceid = le32_to_cpu(revinfo.deviceid);
66 ri->radiorev = le32_to_cpu(revinfo.radiorev);
67 ri->chiprev = le32_to_cpu(revinfo.chiprev);
68 ri->corerev = le32_to_cpu(revinfo.corerev);
69 ri->boardid = le32_to_cpu(revinfo.boardid);
70 ri->boardvendor = le32_to_cpu(revinfo.boardvendor);
71 ri->boardrev = le32_to_cpu(revinfo.boardrev);
72 ri->driverrev = le32_to_cpu(revinfo.driverrev);
73 ri->ucoderev = le32_to_cpu(revinfo.ucoderev);
74 ri->bus = le32_to_cpu(revinfo.bus);
75 ri->chipnum = le32_to_cpu(revinfo.chipnum);
76 ri->phytype = le32_to_cpu(revinfo.phytype);
77 ri->phyrev = le32_to_cpu(revinfo.phyrev);
78 ri->anarev = le32_to_cpu(revinfo.anarev);
79 ri->chippkg = le32_to_cpu(revinfo.chippkg);
80 ri->nvramrev = le32_to_cpu(revinfo.nvramrev);
81 }
82 ri->result = err;
83
54 /* query for 'ver' to get version info from firmware */ 84 /* query for 'ver' to get version info from firmware */
55 memset(buf, 0, sizeof(buf)); 85 memset(buf, 0, sizeof(buf));
56 strcpy(buf, "ver"); 86 strcpy(buf, "ver");
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform_msm.h b/drivers/net/wireless/brcm80211/brcmfmac/common.h
index 2f2229edb498..0d39d80cee28 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform_msm.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.h
@@ -1,5 +1,4 @@
1/* 1/* Copyright (c) 2014 Broadcom Corporation
2 * Copyright (c) 2014 Qualcomm Atheros, Inc.
3 * 2 *
4 * Permission to use, copy, modify, and/or distribute this software for any 3 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 4 * purpose with or without fee is hereby granted, provided that the above
@@ -7,18 +6,15 @@
7 * 6 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 14 */
15#ifndef BRCMFMAC_COMMON_H
16#define BRCMFMAC_COMMON_H
16 17
17#ifndef __WIL_PLATFORM__MSM_H__ 18extern const u8 ALLFFMAC[ETH_ALEN];
18#define __WIL_PLATFORM_MSM_H__
19 19
20#include "wil_platform.h" 20#endif /* BRCMFMAC_COMMON_H */
21
22void *wil_platform_msm_init(struct device *dev, struct wil_platform_ops *ops);
23
24#endif /* __WIL_PLATFORM__MSM_H__ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.h b/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
index 002336e35764..3d404016a92e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.h
@@ -37,6 +37,8 @@ struct brcmf_commonring {
37 unsigned long flags; 37 unsigned long flags;
38 bool inited; 38 bool inited;
39 bool was_full; 39 bool was_full;
40
41 atomic_t outstanding_tx;
40}; 42};
41 43
42 44
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index effe6d7831d9..2d6e2cc1b12c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -197,7 +197,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
197 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx); 197 brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
198 198
199 /* Can the device send data? */ 199 /* Can the device send data? */
200 if (drvr->bus_if->state != BRCMF_BUS_DATA) { 200 if (drvr->bus_if->state != BRCMF_BUS_UP) {
201 brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state); 201 brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
202 netif_stop_queue(ndev); 202 netif_stop_queue(ndev);
203 dev_kfree_skb(skb); 203 dev_kfree_skb(skb);
@@ -601,9 +601,12 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
601{ 601{
602 struct brcmf_if *ifp = netdev_priv(ndev); 602 struct brcmf_if *ifp = netdev_priv(ndev);
603 struct brcmf_pub *drvr = ifp->drvr; 603 struct brcmf_pub *drvr = ifp->drvr;
604 char drev[BRCMU_DOTREV_LEN] = "n/a";
604 605
606 if (drvr->revinfo.result == 0)
607 brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
605 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 608 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
606 snprintf(info->version, sizeof(info->version), "n/a"); 609 strlcpy(info->version, drev, sizeof(info->version));
607 strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version)); 610 strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
608 strlcpy(info->bus_info, dev_name(drvr->bus_if->dev), 611 strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
609 sizeof(info->bus_info)); 612 sizeof(info->bus_info));
@@ -637,7 +640,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
637 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx); 640 brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
638 641
639 /* If bus is not ready, can't continue */ 642 /* If bus is not ready, can't continue */
640 if (bus_if->state != BRCMF_BUS_DATA) { 643 if (bus_if->state != BRCMF_BUS_UP) {
641 brcmf_err("failed bus is not ready\n"); 644 brcmf_err("failed bus is not ready\n");
642 return -EAGAIN; 645 return -EAGAIN;
643 } 646 }
@@ -964,13 +967,20 @@ int brcmf_bus_start(struct device *dev)
964 p2p_ifp = NULL; 967 p2p_ifp = NULL;
965 968
966 /* signal bus ready */ 969 /* signal bus ready */
967 brcmf_bus_change_state(bus_if, BRCMF_BUS_DATA); 970 brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
968 971
969 /* Bus is ready, do any initialization */ 972 /* Bus is ready, do any initialization */
970 ret = brcmf_c_preinit_dcmds(ifp); 973 ret = brcmf_c_preinit_dcmds(ifp);
971 if (ret < 0) 974 if (ret < 0)
972 goto fail; 975 goto fail;
973 976
977 /* assure we have chipid before feature attach */
978 if (!bus_if->chip) {
979 bus_if->chip = drvr->revinfo.chipnum;
980 bus_if->chiprev = drvr->revinfo.chiprev;
981 brcmf_dbg(INFO, "firmware revinfo: chip %x (%d) rev %d\n",
982 bus_if->chip, bus_if->chip, bus_if->chiprev);
983 }
974 brcmf_feat_attach(drvr); 984 brcmf_feat_attach(drvr);
975 985
976 ret = brcmf_fws_init(drvr); 986 ret = brcmf_fws_init(drvr);
@@ -1093,9 +1103,8 @@ static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
1093 return atomic_read(&ifp->pend_8021x_cnt); 1103 return atomic_read(&ifp->pend_8021x_cnt);
1094} 1104}
1095 1105
1096int brcmf_netdev_wait_pend8021x(struct net_device *ndev) 1106int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
1097{ 1107{
1098 struct brcmf_if *ifp = netdev_priv(ndev);
1099 int err; 1108 int err;
1100 1109
1101 err = wait_event_timeout(ifp->pend_8021x_wait, 1110 err = wait_event_timeout(ifp->pend_8021x_wait,
@@ -1107,6 +1116,27 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
1107 return !err; 1116 return !err;
1108} 1117}
1109 1118
1119void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
1120{
1121 struct brcmf_pub *drvr = bus->drvr;
1122 struct net_device *ndev;
1123 int ifidx;
1124
1125 brcmf_dbg(TRACE, "%d -> %d\n", bus->state, state);
1126 bus->state = state;
1127
1128 if (state == BRCMF_BUS_UP) {
1129 for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
1130 if ((drvr->iflist[ifidx]) &&
1131 (drvr->iflist[ifidx]->ndev)) {
1132 ndev = drvr->iflist[ifidx]->ndev;
1133 if (netif_queue_stopped(ndev))
1134 netif_wake_queue(ndev);
1135 }
1136 }
1137 }
1138}
1139
1110static void brcmf_driver_register(struct work_struct *work) 1140static void brcmf_driver_register(struct work_struct *work)
1111{ 1141{
1112#ifdef CONFIG_BRCMFMAC_SDIO 1142#ifdef CONFIG_BRCMFMAC_SDIO
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index 23f74b139cc8..fd74a9c6e9ac 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -29,8 +29,6 @@
29/* For supporting multiple interfaces */ 29/* For supporting multiple interfaces */
30#define BRCMF_MAX_IFS 16 30#define BRCMF_MAX_IFS 16
31 31
32#define DOT11_MAX_DEFAULT_KEYS 4
33
34/* Small, medium and maximum buffer size for dcmd 32/* Small, medium and maximum buffer size for dcmd
35 */ 33 */
36#define BRCMF_DCMD_SMLEN 256 34#define BRCMF_DCMD_SMLEN 256
@@ -73,6 +71,35 @@ struct brcmf_proto; /* device communication protocol info */
73struct brcmf_cfg80211_dev; /* cfg80211 device info */ 71struct brcmf_cfg80211_dev; /* cfg80211 device info */
74struct brcmf_fws_info; /* firmware signalling info */ 72struct brcmf_fws_info; /* firmware signalling info */
75 73
74/*
75 * struct brcmf_rev_info
76 *
77 * The result field stores the error code of the
78 * revision info request from firmware. For the
79 * other fields see struct brcmf_rev_info_le in
80 * fwil_types.h
81 */
82struct brcmf_rev_info {
83 int result;
84 u32 vendorid;
85 u32 deviceid;
86 u32 radiorev;
87 u32 chiprev;
88 u32 corerev;
89 u32 boardid;
90 u32 boardvendor;
91 u32 boardrev;
92 u32 driverrev;
93 u32 ucoderev;
94 u32 bus;
95 u32 chipnum;
96 u32 phytype;
97 u32 phyrev;
98 u32 anarev;
99 u32 chippkg;
100 u32 nvramrev;
101};
102
76/* Common structure for module and instance linkage */ 103/* Common structure for module and instance linkage */
77struct brcmf_pub { 104struct brcmf_pub {
78 /* Linkage ponters */ 105 /* Linkage ponters */
@@ -106,6 +133,7 @@ struct brcmf_pub {
106 u32 feat_flags; 133 u32 feat_flags;
107 u32 chip_quirks; 134 u32 chip_quirks;
108 135
136 struct brcmf_rev_info revinfo;
109#ifdef DEBUG 137#ifdef DEBUG
110 struct dentry *dbgfs_dir; 138 struct dentry *dbgfs_dir;
111#endif 139#endif
@@ -167,7 +195,7 @@ struct brcmf_skb_reorder_data {
167 u8 *reorder; 195 u8 *reorder;
168}; 196};
169 197
170int brcmf_netdev_wait_pend8021x(struct net_device *ndev); 198int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
171 199
172/* Return pointer to interface name */ 200/* Return pointer to interface name */
173char *brcmf_ifname(struct brcmf_pub *drvr, int idx); 201char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 1ff787d1a36b..9cb99152ad17 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -103,7 +103,11 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
103 103
104 c = nvp->fwnv->data[nvp->pos]; 104 c = nvp->fwnv->data[nvp->pos];
105 if (c == '=') { 105 if (c == '=') {
106 st = VALUE; 106 /* ignore RAW1 by treating as comment */
107 if (strncmp(&nvp->fwnv->data[nvp->entry], "RAW1", 4) == 0)
108 st = COMMENT;
109 else
110 st = VALUE;
107 } else if (!is_nvram_char(c)) { 111 } else if (!is_nvram_char(c)) {
108 brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n", 112 brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
109 nvp->line, nvp->column); 113 nvp->line, nvp->column);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index 44f3a84d1999..910fbb561469 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -25,6 +25,7 @@
25#include "proto.h" 25#include "proto.h"
26#include "flowring.h" 26#include "flowring.h"
27#include "msgbuf.h" 27#include "msgbuf.h"
28#include "common.h"
28 29
29 30
30#define BRCMF_FLOWRING_HIGH 1024 31#define BRCMF_FLOWRING_HIGH 1024
@@ -34,9 +35,6 @@
34#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16) 35#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
35#define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16) 36#define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
36 37
37static const u8 ALLZEROMAC[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
38static const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
39
40static const u8 brcmf_flowring_prio2fifo[] = { 38static const u8 brcmf_flowring_prio2fifo[] = {
41 1, 39 1,
42 0, 40 0,
@@ -137,7 +135,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
137 hash = flow->hash; 135 hash = flow->hash;
138 for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) { 136 for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
139 if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) && 137 if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) &&
140 (memcmp(hash[hash_idx].mac, ALLZEROMAC, ETH_ALEN) == 0)) { 138 (is_zero_ether_addr(hash[hash_idx].mac))) {
141 found = true; 139 found = true;
142 break; 140 break;
143 } 141 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 03f2c406a17b..dcfa0bb149ce 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -109,7 +109,7 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
109 struct brcmf_pub *drvr = ifp->drvr; 109 struct brcmf_pub *drvr = ifp->drvr;
110 s32 err; 110 s32 err;
111 111
112 if (drvr->bus_if->state != BRCMF_BUS_DATA) { 112 if (drvr->bus_if->state != BRCMF_BUS_UP) {
113 brcmf_err("bus is down. we have nothing to do.\n"); 113 brcmf_err("bus is down. we have nothing to do.\n");
114 return -EIO; 114 return -EIO;
115 } 115 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index a30be683f4a1..5434dcf64f7d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -43,6 +43,8 @@
43#define BRCMF_C_SET_RADIO 38 43#define BRCMF_C_SET_RADIO 38
44#define BRCMF_C_GET_PHYTYPE 39 44#define BRCMF_C_GET_PHYTYPE 39
45#define BRCMF_C_SET_KEY 45 45#define BRCMF_C_SET_KEY 45
46#define BRCMF_C_GET_REGULATORY 46
47#define BRCMF_C_SET_REGULATORY 47
46#define BRCMF_C_SET_PASSIVE_SCAN 49 48#define BRCMF_C_SET_PASSIVE_SCAN 49
47#define BRCMF_C_SCAN 50 49#define BRCMF_C_SCAN 50
48#define BRCMF_C_SCAN_RESULTS 51 50#define BRCMF_C_SCAN_RESULTS 51
@@ -57,9 +59,12 @@
57#define BRCMF_C_SET_COUNTRY 84 59#define BRCMF_C_SET_COUNTRY 84
58#define BRCMF_C_GET_PM 85 60#define BRCMF_C_GET_PM 85
59#define BRCMF_C_SET_PM 86 61#define BRCMF_C_SET_PM 86
62#define BRCMF_C_GET_REVINFO 98
60#define BRCMF_C_GET_CURR_RATESET 114 63#define BRCMF_C_GET_CURR_RATESET 114
61#define BRCMF_C_GET_AP 117 64#define BRCMF_C_GET_AP 117
62#define BRCMF_C_SET_AP 118 65#define BRCMF_C_SET_AP 118
66#define BRCMF_C_SET_SCB_AUTHORIZE 121
67#define BRCMF_C_SET_SCB_DEAUTHORIZE 122
63#define BRCMF_C_GET_RSSI 127 68#define BRCMF_C_GET_RSSI 127
64#define BRCMF_C_GET_WSEC 133 69#define BRCMF_C_GET_WSEC 133
65#define BRCMF_C_SET_WSEC 134 70#define BRCMF_C_SET_WSEC 134
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 50891c02c4c1..374920965108 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -112,6 +112,7 @@
112#define BRCMF_WOWL_MAXPATTERNS 8 112#define BRCMF_WOWL_MAXPATTERNS 8
113#define BRCMF_WOWL_MAXPATTERNSIZE 128 113#define BRCMF_WOWL_MAXPATTERNSIZE 128
114 114
115#define BRCMF_COUNTRY_BUF_SZ 4
115 116
116/* join preference types for join_pref iovar */ 117/* join preference types for join_pref iovar */
117enum brcmf_join_pref_types { 118enum brcmf_join_pref_types {
@@ -525,4 +526,58 @@ struct brcmf_mbss_ssid_le {
525 unsigned char SSID[32]; 526 unsigned char SSID[32];
526}; 527};
527 528
529/**
530 * struct brcmf_fil_country_le - country configuration structure.
531 *
532 * @country_abbrev: null-terminated country code used in the country IE.
533 * @rev: revision specifier for ccode. on set, -1 indicates unspecified.
534 * @ccode: null-terminated built-in country code.
535 */
536struct brcmf_fil_country_le {
537 char country_abbrev[BRCMF_COUNTRY_BUF_SZ];
538 __le32 rev;
539 char ccode[BRCMF_COUNTRY_BUF_SZ];
540};
541
542/**
543 * struct brcmf_rev_info_le - device revision info.
544 *
545 * @vendorid: PCI vendor id.
546 * @deviceid: device id of chip.
547 * @radiorev: radio revision.
548 * @chiprev: chip revision.
549 * @corerev: core revision.
550 * @boardid: board identifier (usu. PCI sub-device id).
551 * @boardvendor: board vendor (usu. PCI sub-vendor id).
552 * @boardrev: board revision.
553 * @driverrev: driver version.
554 * @ucoderev: microcode version.
555 * @bus: bus type.
556 * @chipnum: chip number.
557 * @phytype: phy type.
558 * @phyrev: phy revision.
559 * @anarev: anacore rev.
560 * @chippkg: chip package info.
561 * @nvramrev: nvram revision number.
562 */
563struct brcmf_rev_info_le {
564 __le32 vendorid;
565 __le32 deviceid;
566 __le32 radiorev;
567 __le32 chiprev;
568 __le32 corerev;
569 __le32 boardid;
570 __le32 boardvendor;
571 __le32 boardrev;
572 __le32 driverrev;
573 __le32 ucoderev;
574 __le32 bus;
575 __le32 chipnum;
576 __le32 phytype;
577 __le32 phyrev;
578 __le32 anarev;
579 __le32 chippkg;
580 __le32 nvramrev;
581};
582
528#endif /* FWIL_TYPES_H_ */ 583#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 456944a6a2db..6262612dec45 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -73,6 +73,8 @@
73#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 73#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
74#define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 74#define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
75 75
76#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64
77#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
76 78
77struct msgbuf_common_hdr { 79struct msgbuf_common_hdr {
78 u8 msgtype; 80 u8 msgtype;
@@ -583,7 +585,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
583 u32 flowid; 585 u32 flowid;
584 void *dma_buf; 586 void *dma_buf;
585 u32 dma_sz; 587 u32 dma_sz;
586 long long address; 588 u64 address;
587 int err; 589 int err;
588 590
589 flowid = work->flowid; 591 flowid = work->flowid;
@@ -620,7 +622,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
620 BRCMF_NROF_H2D_COMMON_MSGRINGS); 622 BRCMF_NROF_H2D_COMMON_MSGRINGS);
621 memcpy(create->sa, work->sa, ETH_ALEN); 623 memcpy(create->sa, work->sa, ETH_ALEN);
622 memcpy(create->da, work->da, ETH_ALEN); 624 memcpy(create->da, work->da, ETH_ALEN);
623 address = (long long)(long)msgbuf->flowring_dma_handle[flowid]; 625 address = (u64)msgbuf->flowring_dma_handle[flowid];
624 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32); 626 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
625 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff); 627 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
626 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM); 628 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
@@ -698,7 +700,7 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
698 dma_addr_t physaddr; 700 dma_addr_t physaddr;
699 u32 pktid; 701 u32 pktid;
700 struct msgbuf_tx_msghdr *tx_msghdr; 702 struct msgbuf_tx_msghdr *tx_msghdr;
701 long long address; 703 u64 address;
702 704
703 commonring = msgbuf->flowrings[flowid]; 705 commonring = msgbuf->flowrings[flowid];
704 if (!brcmf_commonring_write_available(commonring)) 706 if (!brcmf_commonring_write_available(commonring))
@@ -742,13 +744,14 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
742 tx_msghdr->seg_cnt = 1; 744 tx_msghdr->seg_cnt = 1;
743 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN); 745 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
744 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN); 746 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
745 address = (long long)(long)physaddr; 747 address = (u64)physaddr;
746 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32); 748 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
747 tx_msghdr->data_buf_addr.low_addr = 749 tx_msghdr->data_buf_addr.low_addr =
748 cpu_to_le32(address & 0xffffffff); 750 cpu_to_le32(address & 0xffffffff);
749 tx_msghdr->metadata_buf_len = 0; 751 tx_msghdr->metadata_buf_len = 0;
750 tx_msghdr->metadata_buf_addr.high_addr = 0; 752 tx_msghdr->metadata_buf_addr.high_addr = 0;
751 tx_msghdr->metadata_buf_addr.low_addr = 0; 753 tx_msghdr->metadata_buf_addr.low_addr = 0;
754 atomic_inc(&commonring->outstanding_tx);
752 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) { 755 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
753 brcmf_commonring_write_complete(commonring); 756 brcmf_commonring_write_complete(commonring);
754 count = 0; 757 count = 0;
@@ -773,10 +776,16 @@ static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
773} 776}
774 777
775 778
776static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid) 779static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
780 bool force)
777{ 781{
782 struct brcmf_commonring *commonring;
783
778 set_bit(flowid, msgbuf->flow_map); 784 set_bit(flowid, msgbuf->flow_map);
779 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work); 785 commonring = msgbuf->flowrings[flowid];
786 if ((force) || (atomic_read(&commonring->outstanding_tx) <
787 BRCMF_MSGBUF_DELAY_TXWORKER_THRS))
788 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
780 789
781 return 0; 790 return 0;
782} 791}
@@ -797,7 +806,7 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
797 return -ENOMEM; 806 return -ENOMEM;
798 } 807 }
799 brcmf_flowring_enqueue(flow, flowid, skb); 808 brcmf_flowring_enqueue(flow, flowid, skb);
800 brcmf_msgbuf_schedule_txdata(msgbuf, flowid); 809 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false);
801 810
802 return 0; 811 return 0;
803} 812}
@@ -854,6 +863,7 @@ brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
854static void 863static void
855brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) 864brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
856{ 865{
866 struct brcmf_commonring *commonring;
857 struct msgbuf_tx_status *tx_status; 867 struct msgbuf_tx_status *tx_status;
858 u32 idx; 868 u32 idx;
859 struct sk_buff *skb; 869 struct sk_buff *skb;
@@ -871,6 +881,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
871 } 881 }
872 882
873 set_bit(flowid, msgbuf->txstatus_done_map); 883 set_bit(flowid, msgbuf->txstatus_done_map);
884 commonring = msgbuf->flowrings[flowid];
885 atomic_dec(&commonring->outstanding_tx);
874 886
875 brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true); 887 brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true);
876} 888}
@@ -885,7 +897,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
885 u32 pktlen; 897 u32 pktlen;
886 dma_addr_t physaddr; 898 dma_addr_t physaddr;
887 struct msgbuf_rx_bufpost *rx_bufpost; 899 struct msgbuf_rx_bufpost *rx_bufpost;
888 long long address; 900 u64 address;
889 u32 pktid; 901 u32 pktid;
890 u32 i; 902 u32 i;
891 903
@@ -894,7 +906,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
894 count, 906 count,
895 &alloced); 907 &alloced);
896 if (!ret_ptr) { 908 if (!ret_ptr) {
897 brcmf_err("Failed to reserve space in commonring\n"); 909 brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n");
898 return 0; 910 return 0;
899 } 911 }
900 912
@@ -921,7 +933,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
921 } 933 }
922 934
923 if (msgbuf->rx_metadata_offset) { 935 if (msgbuf->rx_metadata_offset) {
924 address = (long long)(long)physaddr; 936 address = (u64)physaddr;
925 rx_bufpost->metadata_buf_len = 937 rx_bufpost->metadata_buf_len =
926 cpu_to_le16(msgbuf->rx_metadata_offset); 938 cpu_to_le16(msgbuf->rx_metadata_offset);
927 rx_bufpost->metadata_buf_addr.high_addr = 939 rx_bufpost->metadata_buf_addr.high_addr =
@@ -936,7 +948,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
936 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST; 948 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
937 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 949 rx_bufpost->msg.request_id = cpu_to_le32(pktid);
938 950
939 address = (long long)(long)physaddr; 951 address = (u64)physaddr;
940 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen); 952 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
941 rx_bufpost->data_buf_addr.high_addr = 953 rx_bufpost->data_buf_addr.high_addr =
942 cpu_to_le32(address >> 32); 954 cpu_to_le32(address >> 32);
@@ -992,7 +1004,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
992 u32 pktlen; 1004 u32 pktlen;
993 dma_addr_t physaddr; 1005 dma_addr_t physaddr;
994 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost; 1006 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
995 long long address; 1007 u64 address;
996 u32 pktid; 1008 u32 pktid;
997 u32 i; 1009 u32 i;
998 1010
@@ -1035,7 +1047,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
1035 MSGBUF_TYPE_IOCTLRESP_BUF_POST; 1047 MSGBUF_TYPE_IOCTLRESP_BUF_POST;
1036 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 1048 rx_bufpost->msg.request_id = cpu_to_le32(pktid);
1037 1049
1038 address = (long long)(long)physaddr; 1050 address = (u64)physaddr;
1039 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen); 1051 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
1040 rx_bufpost->host_buf_addr.high_addr = 1052 rx_bufpost->host_buf_addr.high_addr =
1041 cpu_to_le32(address >> 32); 1053 cpu_to_le32(address >> 32);
@@ -1181,7 +1193,7 @@ brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
1181 1193
1182 brcmf_flowring_open(msgbuf->flow, flowid); 1194 brcmf_flowring_open(msgbuf->flow, flowid);
1183 1195
1184 brcmf_msgbuf_schedule_txdata(msgbuf, flowid); 1196 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1185} 1197}
1186 1198
1187 1199
@@ -1280,8 +1292,10 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
1280 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1292 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1281 struct brcmf_pub *drvr = bus_if->drvr; 1293 struct brcmf_pub *drvr = bus_if->drvr;
1282 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1294 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1295 struct brcmf_commonring *commonring;
1283 void *buf; 1296 void *buf;
1284 u32 flowid; 1297 u32 flowid;
1298 int qlen;
1285 1299
1286 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1300 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1287 brcmf_msgbuf_process_rx(msgbuf, buf); 1301 brcmf_msgbuf_process_rx(msgbuf, buf);
@@ -1293,8 +1307,12 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
1293 for_each_set_bit(flowid, msgbuf->txstatus_done_map, 1307 for_each_set_bit(flowid, msgbuf->txstatus_done_map,
1294 msgbuf->nrof_flowrings) { 1308 msgbuf->nrof_flowrings) {
1295 clear_bit(flowid, msgbuf->txstatus_done_map); 1309 clear_bit(flowid, msgbuf->txstatus_done_map);
1296 if (brcmf_flowring_qlen(msgbuf->flow, flowid)) 1310 commonring = msgbuf->flowrings[flowid];
1297 brcmf_msgbuf_schedule_txdata(msgbuf, flowid); 1311 qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
1312 if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
1313 ((qlen) && (atomic_read(&commonring->outstanding_tx) <
1314 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
1315 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1298 } 1316 }
1299 1317
1300 return 0; 1318 return 0;
@@ -1348,7 +1366,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1348{ 1366{
1349 struct brcmf_bus_msgbuf *if_msgbuf; 1367 struct brcmf_bus_msgbuf *if_msgbuf;
1350 struct brcmf_msgbuf *msgbuf; 1368 struct brcmf_msgbuf *msgbuf;
1351 long long address; 1369 u64 address;
1352 u32 count; 1370 u32 count;
1353 1371
1354 if_msgbuf = drvr->bus_if->msgbuf; 1372 if_msgbuf = drvr->bus_if->msgbuf;
@@ -1379,7 +1397,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1379 GFP_KERNEL); 1397 GFP_KERNEL);
1380 if (!msgbuf->ioctbuf) 1398 if (!msgbuf->ioctbuf)
1381 goto fail; 1399 goto fail;
1382 address = (long long)(long)msgbuf->ioctbuf_handle; 1400 address = (u64)msgbuf->ioctbuf_handle;
1383 msgbuf->ioctbuf_phys_hi = address >> 32; 1401 msgbuf->ioctbuf_phys_hi = address >> 32;
1384 msgbuf->ioctbuf_phys_lo = address & 0xffffffff; 1402 msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
1385 1403
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 905991fdb7b1..61c053a729be 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -959,14 +959,14 @@ brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
959 dma_addr_t *dma_handle) 959 dma_addr_t *dma_handle)
960{ 960{
961 void *ring; 961 void *ring;
962 long long address; 962 u64 address;
963 963
964 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle, 964 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
965 GFP_KERNEL); 965 GFP_KERNEL);
966 if (!ring) 966 if (!ring)
967 return NULL; 967 return NULL;
968 968
969 address = (long long)(long)*dma_handle; 969 address = (u64)*dma_handle;
970 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr, 970 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
971 address & 0xffffffff); 971 address & 0xffffffff);
972 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32); 972 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
@@ -1166,7 +1166,7 @@ brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1166 1166
1167static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) 1167static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1168{ 1168{
1169 long long address; 1169 u64 address;
1170 u32 addr; 1170 u32 addr;
1171 1171
1172 devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev, 1172 devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
@@ -1180,7 +1180,7 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1180 1180
1181 addr = devinfo->shared.tcm_base_address + 1181 addr = devinfo->shared.tcm_base_address +
1182 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET; 1182 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
1183 address = (long long)(long)devinfo->shared.scratch_dmahandle; 1183 address = (u64)devinfo->shared.scratch_dmahandle;
1184 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); 1184 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1185 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); 1185 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1186 addr = devinfo->shared.tcm_base_address + 1186 addr = devinfo->shared.tcm_base_address +
@@ -1198,7 +1198,7 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1198 1198
1199 addr = devinfo->shared.tcm_base_address + 1199 addr = devinfo->shared.tcm_base_address +
1200 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET; 1200 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
1201 address = (long long)(long)devinfo->shared.ringupd_dmahandle; 1201 address = (u64)devinfo->shared.ringupd_dmahandle;
1202 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); 1202 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1203 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); 1203 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1204 addr = devinfo->shared.tcm_base_address + 1204 addr = devinfo->shared.tcm_base_address +
@@ -1828,7 +1828,7 @@ static int brcmf_pcie_resume(struct pci_dev *pdev)
1828 goto cleanup; 1828 goto cleanup;
1829 brcmf_dbg(PCIE, "Hot resume, continue....\n"); 1829 brcmf_dbg(PCIE, "Hot resume, continue....\n");
1830 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); 1830 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1831 brcmf_bus_change_state(bus, BRCMF_BUS_DATA); 1831 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
1832 brcmf_pcie_intr_enable(devinfo); 1832 brcmf_pcie_intr_enable(devinfo);
1833 return 0; 1833 return 0;
1834 } 1834 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index 0b0d51a61060..faec35c899ec 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -44,7 +44,8 @@
44#include "chip.h" 44#include "chip.h"
45#include "firmware.h" 45#include "firmware.h"
46 46
47#define DCMD_RESP_TIMEOUT 2000 /* In milli second */ 47#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
48#define CTL_DONE_TIMEOUT 2000 /* In milli second */
48 49
49#ifdef DEBUG 50#ifdef DEBUG
50 51
@@ -495,9 +496,9 @@ struct brcmf_sdio {
495 u8 *ctrl_frame_buf; 496 u8 *ctrl_frame_buf;
496 u16 ctrl_frame_len; 497 u16 ctrl_frame_len;
497 bool ctrl_frame_stat; 498 bool ctrl_frame_stat;
499 int ctrl_frame_err;
498 500
499 spinlock_t txq_lock; /* protect bus->txq */ 501 spinlock_t txq_lock; /* protect bus->txq */
500 struct semaphore tx_seq_lock; /* protect bus->tx_seq */
501 wait_queue_head_t ctrl_wait; 502 wait_queue_head_t ctrl_wait;
502 wait_queue_head_t dcmd_resp_wait; 503 wait_queue_head_t dcmd_resp_wait;
503 504
@@ -514,7 +515,6 @@ struct brcmf_sdio {
514 bool txoff; /* Transmit flow-controlled */ 515 bool txoff; /* Transmit flow-controlled */
515 struct brcmf_sdio_count sdcnt; 516 struct brcmf_sdio_count sdcnt;
516 bool sr_enabled; /* SaveRestore enabled */ 517 bool sr_enabled; /* SaveRestore enabled */
517 bool sleeping; /* SDIO bus sleeping */
518 518
519 u8 tx_hdrlen; /* sdio bus header length for tx packet */ 519 u8 tx_hdrlen; /* sdio bus header length for tx packet */
520 bool txglom; /* host tx glomming enable flag */ 520 bool txglom; /* host tx glomming enable flag */
@@ -608,6 +608,8 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
608#define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt" 608#define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
609#define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin" 609#define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
610#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt" 610#define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
611#define BCM43340_FIRMWARE_NAME "brcm/brcmfmac43340-sdio.bin"
612#define BCM43340_NVRAM_NAME "brcm/brcmfmac43340-sdio.txt"
611#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin" 613#define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
612#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt" 614#define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
613#define BCM43362_FIRMWARE_NAME "brcm/brcmfmac43362-sdio.bin" 615#define BCM43362_FIRMWARE_NAME "brcm/brcmfmac43362-sdio.bin"
@@ -629,6 +631,8 @@ MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
629MODULE_FIRMWARE(BCM4330_NVRAM_NAME); 631MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
630MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME); 632MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
631MODULE_FIRMWARE(BCM4334_NVRAM_NAME); 633MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
634MODULE_FIRMWARE(BCM43340_FIRMWARE_NAME);
635MODULE_FIRMWARE(BCM43340_NVRAM_NAME);
632MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME); 636MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
633MODULE_FIRMWARE(BCM4335_NVRAM_NAME); 637MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
634MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME); 638MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
@@ -660,6 +664,7 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
660 { BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) }, 664 { BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
661 { BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) }, 665 { BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
662 { BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) }, 666 { BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
667 { BRCM_CC_43340_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43340) },
663 { BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }, 668 { BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
664 { BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) }, 669 { BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
665 { BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }, 670 { BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
@@ -1008,12 +1013,12 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
1008 1013
1009 brcmf_dbg(SDIO, "Enter: request %s currently %s\n", 1014 brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
1010 (sleep ? "SLEEP" : "WAKE"), 1015 (sleep ? "SLEEP" : "WAKE"),
1011 (bus->sleeping ? "SLEEP" : "WAKE")); 1016 (bus->sdiodev->sleeping ? "SLEEP" : "WAKE"));
1012 1017
1013 /* If SR is enabled control bus state with KSO */ 1018 /* If SR is enabled control bus state with KSO */
1014 if (bus->sr_enabled) { 1019 if (bus->sr_enabled) {
1015 /* Done if we're already in the requested state */ 1020 /* Done if we're already in the requested state */
1016 if (sleep == bus->sleeping) 1021 if (sleep == bus->sdiodev->sleeping)
1017 goto end; 1022 goto end;
1018 1023
1019 /* Going to sleep */ 1024 /* Going to sleep */
@@ -1045,12 +1050,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
1045 bus->idlecount = 0; 1050 bus->idlecount = 0;
1046 err = brcmf_sdio_kso_control(bus, true); 1051 err = brcmf_sdio_kso_control(bus, true);
1047 } 1052 }
1048 if (!err) { 1053 if (err) {
1049 /* Change state */
1050 bus->sleeping = sleep;
1051 brcmf_dbg(SDIO, "new state %s\n",
1052 (sleep ? "SLEEP" : "WAKE"));
1053 } else {
1054 brcmf_err("error while changing bus sleep state %d\n", 1054 brcmf_err("error while changing bus sleep state %d\n",
1055 err); 1055 err);
1056 goto done; 1056 goto done;
@@ -1065,6 +1065,11 @@ end:
1065 } else { 1065 } else {
1066 brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok); 1066 brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
1067 } 1067 }
1068 bus->sdiodev->sleeping = sleep;
1069 if (sleep)
1070 wake_up(&bus->sdiodev->idle_wait);
1071 brcmf_dbg(SDIO, "new state %s\n",
1072 (sleep ? "SLEEP" : "WAKE"));
1068done: 1073done:
1069 brcmf_dbg(SDIO, "Exit: err=%d\n", err); 1074 brcmf_dbg(SDIO, "Exit: err=%d\n", err);
1070 return err; 1075 return err;
@@ -1904,7 +1909,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1904 bus->rxpending = true; 1909 bus->rxpending = true;
1905 1910
1906 for (rd->seq_num = bus->rx_seq, rxleft = maxframes; 1911 for (rd->seq_num = bus->rx_seq, rxleft = maxframes;
1907 !bus->rxskip && rxleft && brcmf_bus_ready(bus->sdiodev->bus_if); 1912 !bus->rxskip && rxleft && bus->sdiodev->state == BRCMF_STATE_DATA;
1908 rd->seq_num++, rxleft--) { 1913 rd->seq_num++, rxleft--) {
1909 1914
1910 /* Handle glomming separately */ 1915 /* Handle glomming separately */
@@ -2371,8 +2376,6 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2371 /* Send frames until the limit or some other event */ 2376 /* Send frames until the limit or some other event */
2372 for (cnt = 0; (cnt < maxframes) && data_ok(bus);) { 2377 for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
2373 pkt_num = 1; 2378 pkt_num = 1;
2374 if (down_interruptible(&bus->tx_seq_lock))
2375 return cnt;
2376 if (bus->txglom) 2379 if (bus->txglom)
2377 pkt_num = min_t(u8, bus->tx_max - bus->tx_seq, 2380 pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
2378 bus->sdiodev->txglomsz); 2381 bus->sdiodev->txglomsz);
@@ -2388,13 +2391,10 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2388 __skb_queue_tail(&pktq, pkt); 2391 __skb_queue_tail(&pktq, pkt);
2389 } 2392 }
2390 spin_unlock_bh(&bus->txq_lock); 2393 spin_unlock_bh(&bus->txq_lock);
2391 if (i == 0) { 2394 if (i == 0)
2392 up(&bus->tx_seq_lock);
2393 break; 2395 break;
2394 }
2395 2396
2396 ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL); 2397 ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
2397 up(&bus->tx_seq_lock);
2398 2398
2399 cnt += i; 2399 cnt += i;
2400 2400
@@ -2415,7 +2415,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2415 } 2415 }
2416 2416
2417 /* Deflow-control stack if needed */ 2417 /* Deflow-control stack if needed */
2418 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) && 2418 if ((bus->sdiodev->state == BRCMF_STATE_DATA) &&
2419 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { 2419 bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
2420 bus->txoff = false; 2420 bus->txoff = false;
2421 brcmf_txflowblock(bus->sdiodev->dev, false); 2421 brcmf_txflowblock(bus->sdiodev->dev, false);
@@ -2503,7 +2503,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
2503 bus->watchdog_tsk = NULL; 2503 bus->watchdog_tsk = NULL;
2504 } 2504 }
2505 2505
2506 if (bus_if->state == BRCMF_BUS_DOWN) { 2506 if (sdiodev->state != BRCMF_STATE_NOMEDIUM) {
2507 sdio_claim_host(sdiodev->func[1]); 2507 sdio_claim_host(sdiodev->func[1]);
2508 2508
2509 /* Enable clock for device interrupts */ 2509 /* Enable clock for device interrupts */
@@ -2538,8 +2538,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
2538 brcmu_pktq_flush(&bus->txq, true, NULL, NULL); 2538 brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
2539 2539
2540 /* Clear any held glomming stuff */ 2540 /* Clear any held glomming stuff */
2541 if (bus->glomd) 2541 brcmu_pkt_buf_free_skb(bus->glomd);
2542 brcmu_pkt_buf_free_skb(bus->glomd);
2543 brcmf_sdio_free_glom(bus); 2542 brcmf_sdio_free_glom(bus);
2544 2543
2545 /* Clear rx control and wake any waiters */ 2544 /* Clear rx control and wake any waiters */
@@ -2604,6 +2603,21 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
2604 return ret; 2603 return ret;
2605} 2604}
2606 2605
2606static int brcmf_sdio_pm_resume_wait(struct brcmf_sdio_dev *sdiodev)
2607{
2608#ifdef CONFIG_PM_SLEEP
2609 int retry;
2610
2611 /* Wait for possible resume to complete */
2612 retry = 0;
2613 while ((atomic_read(&sdiodev->suspend)) && (retry++ != 50))
2614 msleep(20);
2615 if (atomic_read(&sdiodev->suspend))
2616 return -EIO;
2617#endif
2618 return 0;
2619}
2620
2607static void brcmf_sdio_dpc(struct brcmf_sdio *bus) 2621static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2608{ 2622{
2609 u32 newstatus = 0; 2623 u32 newstatus = 0;
@@ -2614,6 +2628,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2614 2628
2615 brcmf_dbg(TRACE, "Enter\n"); 2629 brcmf_dbg(TRACE, "Enter\n");
2616 2630
2631 if (brcmf_sdio_pm_resume_wait(bus->sdiodev))
2632 return;
2633
2617 sdio_claim_host(bus->sdiodev->func[1]); 2634 sdio_claim_host(bus->sdiodev->func[1]);
2618 2635
2619 /* If waiting for HTAVAIL, check status */ 2636 /* If waiting for HTAVAIL, check status */
@@ -2720,17 +2737,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2720 brcmf_sdio_clrintr(bus); 2737 brcmf_sdio_clrintr(bus);
2721 2738
2722 if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) && 2739 if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
2723 (down_interruptible(&bus->tx_seq_lock) == 0)) { 2740 data_ok(bus)) {
2724 if (data_ok(bus)) { 2741 sdio_claim_host(bus->sdiodev->func[1]);
2725 sdio_claim_host(bus->sdiodev->func[1]); 2742 err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
2726 err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf, 2743 bus->ctrl_frame_len);
2727 bus->ctrl_frame_len); 2744 sdio_release_host(bus->sdiodev->func[1]);
2728 sdio_release_host(bus->sdiodev->func[1]); 2745 bus->ctrl_frame_err = err;
2729 2746 bus->ctrl_frame_stat = false;
2730 bus->ctrl_frame_stat = false; 2747 brcmf_sdio_wait_event_wakeup(bus);
2731 brcmf_sdio_wait_event_wakeup(bus);
2732 }
2733 up(&bus->tx_seq_lock);
2734 } 2748 }
2735 /* Send queued frames (limit 1 if rx may still be pending) */ 2749 /* Send queued frames (limit 1 if rx may still be pending) */
2736 if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) && 2750 if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
@@ -2741,7 +2755,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2741 brcmf_sdio_sendfromq(bus, framecnt); 2755 brcmf_sdio_sendfromq(bus, framecnt);
2742 } 2756 }
2743 2757
2744 if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) { 2758 if ((bus->sdiodev->state != BRCMF_STATE_DATA) || (err != 0)) {
2745 brcmf_err("failed backplane access over SDIO, halting operation\n"); 2759 brcmf_err("failed backplane access over SDIO, halting operation\n");
2746 atomic_set(&bus->intstatus, 0); 2760 atomic_set(&bus->intstatus, 0);
2747 } else if (atomic_read(&bus->intstatus) || 2761 } else if (atomic_read(&bus->intstatus) ||
@@ -2942,43 +2956,30 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2942 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 2956 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2943 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 2957 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
2944 struct brcmf_sdio *bus = sdiodev->bus; 2958 struct brcmf_sdio *bus = sdiodev->bus;
2945 int ret = -1; 2959 int ret;
2946 2960
2947 brcmf_dbg(TRACE, "Enter\n"); 2961 brcmf_dbg(TRACE, "Enter\n");
2948 2962
2949 if (down_interruptible(&bus->tx_seq_lock)) 2963 /* Send from dpc */
2950 return -EINTR; 2964 bus->ctrl_frame_buf = msg;
2951 2965 bus->ctrl_frame_len = msglen;
2952 if (!data_ok(bus)) { 2966 bus->ctrl_frame_stat = true;
2953 brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n", 2967 if (atomic_read(&bus->dpc_tskcnt) == 0) {
2954 bus->tx_max, bus->tx_seq); 2968 atomic_inc(&bus->dpc_tskcnt);
2955 up(&bus->tx_seq_lock); 2969 queue_work(bus->brcmf_wq, &bus->datawork);
2956 /* Send from dpc */
2957 bus->ctrl_frame_buf = msg;
2958 bus->ctrl_frame_len = msglen;
2959 bus->ctrl_frame_stat = true;
2960
2961 wait_event_interruptible_timeout(bus->ctrl_wait,
2962 !bus->ctrl_frame_stat,
2963 msecs_to_jiffies(2000));
2964
2965 if (!bus->ctrl_frame_stat) {
2966 brcmf_dbg(SDIO, "ctrl_frame_stat == false\n");
2967 ret = 0;
2968 } else {
2969 brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
2970 bus->ctrl_frame_stat = false;
2971 if (down_interruptible(&bus->tx_seq_lock))
2972 return -EINTR;
2973 ret = -1;
2974 }
2975 } 2970 }
2976 if (ret == -1) { 2971
2977 sdio_claim_host(bus->sdiodev->func[1]); 2972 wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
2978 brcmf_sdio_bus_sleep(bus, false, false); 2973 msecs_to_jiffies(CTL_DONE_TIMEOUT));
2979 ret = brcmf_sdio_tx_ctrlframe(bus, msg, msglen); 2974
2980 sdio_release_host(bus->sdiodev->func[1]); 2975 if (!bus->ctrl_frame_stat) {
2981 up(&bus->tx_seq_lock); 2976 brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
2977 bus->ctrl_frame_err);
2978 ret = bus->ctrl_frame_err;
2979 } else {
2980 brcmf_dbg(SDIO, "ctrl_frame timeout\n");
2981 bus->ctrl_frame_stat = false;
2982 ret = -ETIMEDOUT;
2982 } 2983 }
2983 2984
2984 if (ret) 2985 if (ret)
@@ -2986,7 +2987,7 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2986 else 2987 else
2987 bus->sdcnt.tx_ctlpkts++; 2988 bus->sdcnt.tx_ctlpkts++;
2988 2989
2989 return ret ? -EIO : 0; 2990 return ret;
2990} 2991}
2991 2992
2992#ifdef DEBUG 2993#ifdef DEBUG
@@ -3409,8 +3410,8 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
3409 goto err; 3410 goto err;
3410 } 3411 }
3411 3412
3412 /* Allow HT Clock now that the ARM is running. */ 3413 /* Allow full data communication using DPC from now on. */
3413 brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_LOAD); 3414 bus->sdiodev->state = BRCMF_STATE_DATA;
3414 bcmerror = 0; 3415 bcmerror = 0;
3415 3416
3416err: 3417err:
@@ -3556,7 +3557,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
3556 return; 3557 return;
3557 } 3558 }
3558 3559
3559 if (!brcmf_bus_ready(bus->sdiodev->bus_if)) { 3560 if (bus->sdiodev->state != BRCMF_STATE_DATA) {
3560 brcmf_err("bus is down. we have nothing to do\n"); 3561 brcmf_err("bus is down. we have nothing to do\n");
3561 return; 3562 return;
3562 } 3563 }
@@ -3579,10 +3580,6 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
3579 3580
3580static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) 3581static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
3581{ 3582{
3582#ifdef DEBUG
3583 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3584#endif /* DEBUG */
3585
3586 brcmf_dbg(TIMER, "Enter\n"); 3583 brcmf_dbg(TIMER, "Enter\n");
3587 3584
3588 /* Poll period: check device if appropriate. */ 3585 /* Poll period: check device if appropriate. */
@@ -3626,7 +3623,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
3626 } 3623 }
3627#ifdef DEBUG 3624#ifdef DEBUG
3628 /* Poll for console output periodically */ 3625 /* Poll for console output periodically */
3629 if (bus_if && bus_if->state == BRCMF_BUS_DATA && 3626 if (bus->sdiodev->state == BRCMF_STATE_DATA &&
3630 bus->console_interval != 0) { 3627 bus->console_interval != 0) {
3631 bus->console.count += BRCMF_WD_POLL_MS; 3628 bus->console.count += BRCMF_WD_POLL_MS;
3632 if (bus->console.count >= bus->console_interval) { 3629 if (bus->console.count >= bus->console_interval) {
@@ -3811,7 +3808,7 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
3811 u32 val, rev; 3808 u32 val, rev;
3812 3809
3813 val = brcmf_sdiod_regrl(sdiodev, addr, NULL); 3810 val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
3814 if (sdiodev->func[0]->device == BRCM_SDIO_4335_4339_DEVICE_ID && 3811 if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
3815 addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) { 3812 addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
3816 rev = (val & CID_REV_MASK) >> CID_REV_SHIFT; 3813 rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
3817 if (rev >= 2) { 3814 if (rev >= 2) {
@@ -3867,11 +3864,6 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3867 goto fail; 3864 goto fail;
3868 } 3865 }
3869 3866
3870 /* SDIO register access works so moving
3871 * state from UNKNOWN to DOWN.
3872 */
3873 brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
3874
3875 bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops); 3867 bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
3876 if (IS_ERR(bus->ci)) { 3868 if (IS_ERR(bus->ci)) {
3877 brcmf_err("brcmf_chip_attach failed!\n"); 3869 brcmf_err("brcmf_chip_attach failed!\n");
@@ -4005,18 +3997,16 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
4005 3997
4006 brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); 3998 brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
4007 3999
4008 /* try to download image and nvram to the dongle */
4009 if (bus_if->state == BRCMF_BUS_DOWN) {
4010 bus->alp_only = true;
4011 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
4012 if (err)
4013 goto fail;
4014 bus->alp_only = false;
4015 }
4016
4017 if (!bus_if->drvr) 4000 if (!bus_if->drvr)
4018 return; 4001 return;
4019 4002
4003 /* try to download image and nvram to the dongle */
4004 bus->alp_only = true;
4005 err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
4006 if (err)
4007 goto fail;
4008 bus->alp_only = false;
4009
4020 /* Start the watchdog timer */ 4010 /* Start the watchdog timer */
4021 bus->sdcnt.tickcnt = 0; 4011 bus->sdcnt.tickcnt = 0;
4022 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS); 4012 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
@@ -4142,7 +4132,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4142 4132
4143 spin_lock_init(&bus->rxctl_lock); 4133 spin_lock_init(&bus->rxctl_lock);
4144 spin_lock_init(&bus->txq_lock); 4134 spin_lock_init(&bus->txq_lock);
4145 sema_init(&bus->tx_seq_lock, 1);
4146 init_waitqueue_head(&bus->ctrl_wait); 4135 init_waitqueue_head(&bus->ctrl_wait);
4147 init_waitqueue_head(&bus->dcmd_resp_wait); 4136 init_waitqueue_head(&bus->dcmd_resp_wait);
4148 4137
@@ -4213,7 +4202,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4213 bus->idleclock = BRCMF_IDLE_ACTIVE; 4202 bus->idleclock = BRCMF_IDLE_ACTIVE;
4214 4203
4215 /* SR state */ 4204 /* SR state */
4216 bus->sleeping = false;
4217 bus->sr_enabled = false; 4205 bus->sr_enabled = false;
4218 4206
4219 brcmf_sdio_debugfs_create(bus); 4207 brcmf_sdio_debugfs_create(bus);
@@ -4254,7 +4242,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
4254 destroy_workqueue(bus->brcmf_wq); 4242 destroy_workqueue(bus->brcmf_wq);
4255 4243
4256 if (bus->ci) { 4244 if (bus->ci) {
4257 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) { 4245 if (bus->sdiodev->state != BRCMF_STATE_NOMEDIUM) {
4258 sdio_claim_host(bus->sdiodev->func[1]); 4246 sdio_claim_host(bus->sdiodev->func[1]);
4259 brcmf_sdio_clkctl(bus, CLK_AVAIL, false); 4247 brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
4260 /* Leave the device in state where it is 4248 /* Leave the device in state where it is
@@ -4289,7 +4277,7 @@ void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, uint wdtick)
4289 } 4277 }
4290 4278
4291 /* don't start the wd until fw is loaded */ 4279 /* don't start the wd until fw is loaded */
4292 if (bus->sdiodev->bus_if->state != BRCMF_BUS_DATA) 4280 if (bus->sdiodev->state != BRCMF_STATE_DATA)
4293 return; 4281 return;
4294 4282
4295 if (wdtick) { 4283 if (wdtick) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
index 8eb42620129c..ec2586a8425c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.h
@@ -155,6 +155,13 @@
155/* watchdog polling interval in ms */ 155/* watchdog polling interval in ms */
156#define BRCMF_WD_POLL_MS 10 156#define BRCMF_WD_POLL_MS 10
157 157
158/* The state of the bus */
159enum brcmf_sdio_state {
160 BRCMF_STATE_DOWN, /* Device available, still initialising */
161 BRCMF_STATE_DATA, /* Ready for data transfers, DPC enabled */
162 BRCMF_STATE_NOMEDIUM /* No medium access to dongle possible */
163};
164
158struct brcmf_sdreg { 165struct brcmf_sdreg {
159 int func; 166 int func;
160 int offset; 167 int offset;
@@ -169,8 +176,8 @@ struct brcmf_sdio_dev {
169 u32 sbwad; /* Save backplane window address */ 176 u32 sbwad; /* Save backplane window address */
170 struct brcmf_sdio *bus; 177 struct brcmf_sdio *bus;
171 atomic_t suspend; /* suspend flag */ 178 atomic_t suspend; /* suspend flag */
172 wait_queue_head_t request_word_wait; 179 bool sleeping;
173 wait_queue_head_t request_buffer_wait; 180 wait_queue_head_t idle_wait;
174 struct device *dev; 181 struct device *dev;
175 struct brcmf_bus *bus_if; 182 struct brcmf_bus *bus_if;
176 struct brcmfmac_sdio_platform_data *pdata; 183 struct brcmfmac_sdio_platform_data *pdata;
@@ -187,6 +194,7 @@ struct brcmf_sdio_dev {
187 char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; 194 char fw_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
188 char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN]; 195 char nvram_name[BRCMF_FW_PATH_LEN + BRCMF_FW_NAME_LEN];
189 bool wowl_enabled; 196 bool wowl_enabled;
197 enum brcmf_sdio_state state;
190}; 198};
191 199
192/* sdio core registers */ 200/* sdio core registers */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 4572defc280f..5df6aa72cc2d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -421,7 +421,7 @@ fail:
421 brcmf_err("fail!\n"); 421 brcmf_err("fail!\n");
422 while (!list_empty(q)) { 422 while (!list_empty(q)) {
423 req = list_entry(q->next, struct brcmf_usbreq, list); 423 req = list_entry(q->next, struct brcmf_usbreq, list);
424 if (req && req->urb) 424 if (req)
425 usb_free_urb(req->urb); 425 usb_free_urb(req->urb);
426 list_del(q->next); 426 list_del(q->next);
427 } 427 }
@@ -576,7 +576,7 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
576 brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN); 576 brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DOWN);
577 } else if (state == BRCMFMAC_USB_STATE_UP) { 577 } else if (state == BRCMFMAC_USB_STATE_UP) {
578 brcmf_dbg(USB, "DBUS is up\n"); 578 brcmf_dbg(USB, "DBUS is up\n");
579 brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_DATA); 579 brcmf_bus_change_state(bcmf_bus, BRCMF_BUS_UP);
580 } else { 580 } else {
581 brcmf_dbg(USB, "DBUS current state=%d\n", state); 581 brcmf_dbg(USB, "DBUS current state=%d\n", state);
582 } 582 }
@@ -1263,6 +1263,8 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1263 ret = brcmf_usb_bus_setup(devinfo); 1263 ret = brcmf_usb_bus_setup(devinfo);
1264 if (ret) 1264 if (ret)
1265 goto fail; 1265 goto fail;
1266 /* we are done */
1267 return 0;
1266 } 1268 }
1267 bus->chip = bus_pub->devid; 1269 bus->chip = bus_pub->devid;
1268 bus->chiprev = bus_pub->chiprev; 1270 bus->chiprev = bus_pub->chiprev;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
index c9a8b9360ab1..7a1fbb2e3a71 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/debug.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
@@ -78,7 +78,7 @@ int brcms_debugfs_hardware_read(struct seq_file *s, void *data)
78 struct brcms_hardware *hw = drvr->wlc->hw; 78 struct brcms_hardware *hw = drvr->wlc->hw;
79 struct bcma_device *core = hw->d11core; 79 struct bcma_device *core = hw->d11core;
80 struct bcma_bus *bus = core->bus; 80 struct bcma_bus *bus = core->bus;
81 char boardrev[10]; 81 char boardrev[BRCMU_BOARDREV_LEN];
82 82
83 seq_printf(s, "chipnum 0x%x\n" 83 seq_printf(s, "chipnum 0x%x\n"
84 "chiprev 0x%x\n" 84 "chiprev 0x%x\n"
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index 906e89ddf319..0543607002fd 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -267,15 +267,43 @@ char *brcmu_boardrev_str(u32 brev, char *buf)
267 char c; 267 char c;
268 268
269 if (brev < 0x100) { 269 if (brev < 0x100) {
270 snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); 270 snprintf(buf, BRCMU_BOARDREV_LEN, "%d.%d",
271 (brev & 0xf0) >> 4, brev & 0xf);
271 } else { 272 } else {
272 c = (brev & 0xf000) == 0x1000 ? 'P' : 'A'; 273 c = (brev & 0xf000) == 0x1000 ? 'P' : 'A';
273 snprintf(buf, 8, "%c%03x", c, brev & 0xfff); 274 snprintf(buf, BRCMU_BOARDREV_LEN, "%c%03x", c, brev & 0xfff);
274 } 275 }
275 return buf; 276 return buf;
276} 277}
277EXPORT_SYMBOL(brcmu_boardrev_str); 278EXPORT_SYMBOL(brcmu_boardrev_str);
278 279
280char *brcmu_dotrev_str(u32 dotrev, char *buf)
281{
282 u8 dotval[4];
283
284 if (!dotrev) {
285 snprintf(buf, BRCMU_DOTREV_LEN, "unknown");
286 return buf;
287 }
288 dotval[0] = (dotrev >> 24) & 0xFF;
289 dotval[1] = (dotrev >> 16) & 0xFF;
290 dotval[2] = (dotrev >> 8) & 0xFF;
291 dotval[3] = dotrev & 0xFF;
292
293 if (dotval[3])
294 snprintf(buf, BRCMU_DOTREV_LEN, "%d.%d.%d.%d", dotval[0],
295 dotval[1], dotval[2], dotval[3]);
296 else if (dotval[2])
297 snprintf(buf, BRCMU_DOTREV_LEN, "%d.%d.%d", dotval[0],
298 dotval[1], dotval[2]);
299 else
300 snprintf(buf, BRCMU_DOTREV_LEN, "%d.%d", dotval[0],
301 dotval[1]);
302
303 return buf;
304}
305EXPORT_SYMBOL(brcmu_dotrev_str);
306
279#if defined(DEBUG) 307#if defined(DEBUG)
280/* pretty hex print a pkt buffer chain */ 308/* pretty hex print a pkt buffer chain */
281void brcmu_prpkt(const char *msg, struct sk_buff *p0) 309void brcmu_prpkt(const char *msg, struct sk_buff *p0)
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 6996fcc144cf..2124a17d0bfd 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -22,7 +22,6 @@
22 22
23#define BRCM_USB_VENDOR_ID_BROADCOM 0x0a5c 23#define BRCM_USB_VENDOR_ID_BROADCOM 0x0a5c
24#define BRCM_PCIE_VENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM 24#define BRCM_PCIE_VENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
25#define BRCM_SDIO_VENDOR_ID_BROADCOM SDIO_VENDOR_ID_BROADCOM
26 25
27/* Chipcommon Core Chip IDs */ 26/* Chipcommon Core Chip IDs */
28#define BRCM_CC_43143_CHIP_ID 43143 27#define BRCM_CC_43143_CHIP_ID 43143
@@ -34,6 +33,7 @@
34#define BRCM_CC_4329_CHIP_ID 0x4329 33#define BRCM_CC_4329_CHIP_ID 0x4329
35#define BRCM_CC_4330_CHIP_ID 0x4330 34#define BRCM_CC_4330_CHIP_ID 0x4330
36#define BRCM_CC_4334_CHIP_ID 0x4334 35#define BRCM_CC_4334_CHIP_ID 0x4334
36#define BRCM_CC_43340_CHIP_ID 43340
37#define BRCM_CC_43362_CHIP_ID 43362 37#define BRCM_CC_43362_CHIP_ID 43362
38#define BRCM_CC_4335_CHIP_ID 0x4335 38#define BRCM_CC_4335_CHIP_ID 0x4335
39#define BRCM_CC_4339_CHIP_ID 0x4339 39#define BRCM_CC_4339_CHIP_ID 0x4339
@@ -45,16 +45,6 @@
45#define BRCM_CC_43570_CHIP_ID 43570 45#define BRCM_CC_43570_CHIP_ID 43570
46#define BRCM_CC_43602_CHIP_ID 43602 46#define BRCM_CC_43602_CHIP_ID 43602
47 47
48/* SDIO Device IDs */
49#define BRCM_SDIO_43143_DEVICE_ID BRCM_CC_43143_CHIP_ID
50#define BRCM_SDIO_43241_DEVICE_ID BRCM_CC_43241_CHIP_ID
51#define BRCM_SDIO_4329_DEVICE_ID BRCM_CC_4329_CHIP_ID
52#define BRCM_SDIO_4330_DEVICE_ID BRCM_CC_4330_CHIP_ID
53#define BRCM_SDIO_4334_DEVICE_ID BRCM_CC_4334_CHIP_ID
54#define BRCM_SDIO_43362_DEVICE_ID BRCM_CC_43362_CHIP_ID
55#define BRCM_SDIO_4335_4339_DEVICE_ID BRCM_CC_4335_CHIP_ID
56#define BRCM_SDIO_4354_DEVICE_ID BRCM_CC_4354_CHIP_ID
57
58/* USB Device IDs */ 48/* USB Device IDs */
59#define BRCM_USB_43143_DEVICE_ID 0xbd1e 49#define BRCM_USB_43143_DEVICE_ID 0xbd1e
60#define BRCM_USB_43236_DEVICE_ID 0xbd17 50#define BRCM_USB_43236_DEVICE_ID 0xbd17
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index a043e29f07e2..41969527b459 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -218,6 +218,10 @@ void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...)
218} 218}
219#endif 219#endif
220 220
221#define BRCMU_BOARDREV_LEN 8
222#define BRCMU_DOTREV_LEN 16
223
221char *brcmu_boardrev_str(u32 brev, char *buf); 224char *brcmu_boardrev_str(u32 brev, char *buf);
225char *brcmu_dotrev_str(u32 dotrev, char *buf);
222 226
223#endif /* _BRCMU_UTILS_H_ */ 227#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
index 6f1b9aace8b3..30e7646d04af 100644
--- a/drivers/net/wireless/cw1200/fwio.c
+++ b/drivers/net/wireless/cw1200/fwio.c
@@ -66,25 +66,31 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
66 do { \ 66 do { \
67 ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \ 67 ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \
68 if (ret < 0) \ 68 if (ret < 0) \
69 goto error; \ 69 goto exit; \
70 } while (0)
71#define APB_WRITE2(reg, val) \
72 do { \
73 ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \
74 if (ret < 0) \
75 goto free_buffer; \
70 } while (0) 76 } while (0)
71#define APB_READ(reg, val) \ 77#define APB_READ(reg, val) \
72 do { \ 78 do { \
73 ret = cw1200_apb_read_32(priv, CW1200_APB(reg), &(val)); \ 79 ret = cw1200_apb_read_32(priv, CW1200_APB(reg), &(val)); \
74 if (ret < 0) \ 80 if (ret < 0) \
75 goto error; \ 81 goto free_buffer; \
76 } while (0) 82 } while (0)
77#define REG_WRITE(reg, val) \ 83#define REG_WRITE(reg, val) \
78 do { \ 84 do { \
79 ret = cw1200_reg_write_32(priv, (reg), (val)); \ 85 ret = cw1200_reg_write_32(priv, (reg), (val)); \
80 if (ret < 0) \ 86 if (ret < 0) \
81 goto error; \ 87 goto exit; \
82 } while (0) 88 } while (0)
83#define REG_READ(reg, val) \ 89#define REG_READ(reg, val) \
84 do { \ 90 do { \
85 ret = cw1200_reg_read_32(priv, (reg), &(val)); \ 91 ret = cw1200_reg_read_32(priv, (reg), &(val)); \
86 if (ret < 0) \ 92 if (ret < 0) \
87 goto error; \ 93 goto exit; \
88 } while (0) 94 } while (0)
89 95
90 switch (priv->hw_revision) { 96 switch (priv->hw_revision) {
@@ -142,14 +148,14 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
142 ret = request_firmware(&firmware, fw_path, priv->pdev); 148 ret = request_firmware(&firmware, fw_path, priv->pdev);
143 if (ret) { 149 if (ret) {
144 pr_err("Can't load firmware file %s.\n", fw_path); 150 pr_err("Can't load firmware file %s.\n", fw_path);
145 goto error; 151 goto exit;
146 } 152 }
147 153
148 buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); 154 buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA);
149 if (!buf) { 155 if (!buf) {
150 pr_err("Can't allocate firmware load buffer.\n"); 156 pr_err("Can't allocate firmware load buffer.\n");
151 ret = -ENOMEM; 157 ret = -ENOMEM;
152 goto error; 158 goto firmware_release;
153 } 159 }
154 160
155 /* Check if the bootloader is ready */ 161 /* Check if the bootloader is ready */
@@ -163,7 +169,7 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
163 if (val32 != DOWNLOAD_I_AM_HERE) { 169 if (val32 != DOWNLOAD_I_AM_HERE) {
164 pr_err("Bootloader is not ready.\n"); 170 pr_err("Bootloader is not ready.\n");
165 ret = -ETIMEDOUT; 171 ret = -ETIMEDOUT;
166 goto error; 172 goto free_buffer;
167 } 173 }
168 174
169 /* Calculcate number of download blocks */ 175 /* Calculcate number of download blocks */
@@ -171,7 +177,7 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
171 177
172 /* Updating the length in Download Ctrl Area */ 178 /* Updating the length in Download Ctrl Area */
173 val32 = firmware->size; /* Explicit cast from size_t to u32 */ 179 val32 = firmware->size; /* Explicit cast from size_t to u32 */
174 APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, val32); 180 APB_WRITE2(DOWNLOAD_IMAGE_SIZE_REG, val32);
175 181
176 /* Firmware downloading loop */ 182 /* Firmware downloading loop */
177 for (block = 0; block < num_blocks; block++) { 183 for (block = 0; block < num_blocks; block++) {
@@ -183,7 +189,7 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
183 if (val32 != DOWNLOAD_PENDING) { 189 if (val32 != DOWNLOAD_PENDING) {
184 pr_err("Bootloader reported error %d.\n", val32); 190 pr_err("Bootloader reported error %d.\n", val32);
185 ret = -EIO; 191 ret = -EIO;
186 goto error; 192 goto free_buffer;
187 } 193 }
188 194
189 /* loop until put - get <= 24K */ 195 /* loop until put - get <= 24K */
@@ -198,7 +204,7 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
198 if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) { 204 if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) {
199 pr_err("Timeout waiting for FIFO.\n"); 205 pr_err("Timeout waiting for FIFO.\n");
200 ret = -ETIMEDOUT; 206 ret = -ETIMEDOUT;
201 goto error; 207 goto free_buffer;
202 } 208 }
203 209
204 /* calculate the block size */ 210 /* calculate the block size */
@@ -220,12 +226,12 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
220 if (ret < 0) { 226 if (ret < 0) {
221 pr_err("Can't write firmware block @ %d!\n", 227 pr_err("Can't write firmware block @ %d!\n",
222 put & (DOWNLOAD_FIFO_SIZE - 1)); 228 put & (DOWNLOAD_FIFO_SIZE - 1));
223 goto error; 229 goto free_buffer;
224 } 230 }
225 231
226 /* update the put register */ 232 /* update the put register */
227 put += block_size; 233 put += block_size;
228 APB_WRITE(DOWNLOAD_PUT_REG, put); 234 APB_WRITE2(DOWNLOAD_PUT_REG, put);
229 } /* End of firmware download loop */ 235 } /* End of firmware download loop */
230 236
231 /* Wait for the download completion */ 237 /* Wait for the download completion */
@@ -238,19 +244,21 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
238 if (val32 != DOWNLOAD_SUCCESS) { 244 if (val32 != DOWNLOAD_SUCCESS) {
239 pr_err("Wait for download completion failed: 0x%.8X\n", val32); 245 pr_err("Wait for download completion failed: 0x%.8X\n", val32);
240 ret = -ETIMEDOUT; 246 ret = -ETIMEDOUT;
241 goto error; 247 goto free_buffer;
242 } else { 248 } else {
243 pr_info("Firmware download completed.\n"); 249 pr_info("Firmware download completed.\n");
244 ret = 0; 250 ret = 0;
245 } 251 }
246 252
247error: 253free_buffer:
248 kfree(buf); 254 kfree(buf);
249 if (firmware) 255firmware_release:
250 release_firmware(firmware); 256 release_firmware(firmware);
257exit:
251 return ret; 258 return ret;
252 259
253#undef APB_WRITE 260#undef APB_WRITE
261#undef APB_WRITE2
254#undef APB_READ 262#undef APB_READ
255#undef REG_WRITE 263#undef REG_WRITE
256#undef REG_READ 264#undef REG_READ
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 3e78cc3ccb78..3689dbbd10bd 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -282,7 +282,6 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
282 IEEE80211_HW_SUPPORTS_PS | 282 IEEE80211_HW_SUPPORTS_PS |
283 IEEE80211_HW_SUPPORTS_DYNAMIC_PS | 283 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
284 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 284 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
285 IEEE80211_HW_SUPPORTS_UAPSD |
286 IEEE80211_HW_CONNECTION_MONITOR | 285 IEEE80211_HW_CONNECTION_MONITOR |
287 IEEE80211_HW_AMPDU_AGGREGATION | 286 IEEE80211_HW_AMPDU_AGGREGATION |
288 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW | 287 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
@@ -374,9 +373,8 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
374 INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work); 373 INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work);
375 INIT_WORK(&priv->set_beacon_wakeup_period_work, 374 INIT_WORK(&priv->set_beacon_wakeup_period_work,
376 cw1200_set_beacon_wakeup_period_work); 375 cw1200_set_beacon_wakeup_period_work);
377 init_timer(&priv->mcast_timeout); 376 setup_timer(&priv->mcast_timeout, cw1200_mcast_timeout,
378 priv->mcast_timeout.data = (unsigned long)priv; 377 (unsigned long)priv);
379 priv->mcast_timeout.function = cw1200_mcast_timeout;
380 378
381 if (cw1200_queue_stats_init(&priv->tx_queue_stats, 379 if (cw1200_queue_stats_init(&priv->tx_queue_stats,
382 CW1200_LINK_ID_MAX, 380 CW1200_LINK_ID_MAX,
diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c
index 6907c8fd4578..d2202ae92bdd 100644
--- a/drivers/net/wireless/cw1200/pm.c
+++ b/drivers/net/wireless/cw1200/pm.c
@@ -101,9 +101,8 @@ int cw1200_pm_init(struct cw1200_pm_state *pm,
101{ 101{
102 spin_lock_init(&pm->lock); 102 spin_lock_init(&pm->lock);
103 103
104 init_timer(&pm->stay_awake); 104 setup_timer(&pm->stay_awake, cw1200_pm_stay_awake_tmo,
105 pm->stay_awake.data = (unsigned long)pm; 105 (unsigned long)pm);
106 pm->stay_awake.function = cw1200_pm_stay_awake_tmo;
107 106
108 return 0; 107 return 0;
109} 108}
diff --git a/drivers/net/wireless/cw1200/queue.c b/drivers/net/wireless/cw1200/queue.c
index 9c3925f58d79..0ba5ef9b3e7b 100644
--- a/drivers/net/wireless/cw1200/queue.c
+++ b/drivers/net/wireless/cw1200/queue.c
@@ -179,9 +179,7 @@ int cw1200_queue_init(struct cw1200_queue *queue,
179 INIT_LIST_HEAD(&queue->pending); 179 INIT_LIST_HEAD(&queue->pending);
180 INIT_LIST_HEAD(&queue->free_pool); 180 INIT_LIST_HEAD(&queue->free_pool);
181 spin_lock_init(&queue->lock); 181 spin_lock_init(&queue->lock);
182 init_timer(&queue->gc); 182 setup_timer(&queue->gc, cw1200_queue_gc, (unsigned long)queue);
183 queue->gc.data = (unsigned long)queue;
184 queue->gc.function = cw1200_queue_gc;
185 183
186 queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity, 184 queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
187 GFP_KERNEL); 185 GFP_KERNEL);
diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c
index f2e276faca70..bff81b8d4164 100644
--- a/drivers/net/wireless/cw1200/scan.c
+++ b/drivers/net/wireless/cw1200/scan.c
@@ -39,9 +39,9 @@ static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan)
39 cancel_delayed_work_sync(&priv->clear_recent_scan_work); 39 cancel_delayed_work_sync(&priv->clear_recent_scan_work);
40 atomic_set(&priv->scan.in_progress, 1); 40 atomic_set(&priv->scan.in_progress, 1);
41 atomic_set(&priv->recent_scan, 1); 41 atomic_set(&priv->recent_scan, 1);
42 cw1200_pm_stay_awake(&priv->pm_state, tmo * HZ / 1000); 42 cw1200_pm_stay_awake(&priv->pm_state, msecs_to_jiffies(tmo));
43 queue_delayed_work(priv->workqueue, &priv->scan.timeout, 43 queue_delayed_work(priv->workqueue, &priv->scan.timeout,
44 tmo * HZ / 1000); 44 msecs_to_jiffies(tmo));
45 ret = wsm_scan(priv, scan); 45 ret = wsm_scan(priv, scan);
46 if (ret) { 46 if (ret) {
47 atomic_set(&priv->scan.in_progress, 0); 47 atomic_set(&priv->scan.in_progress, 0);
@@ -386,8 +386,8 @@ void cw1200_probe_work(struct work_struct *work)
386 if (down_trylock(&priv->scan.lock)) { 386 if (down_trylock(&priv->scan.lock)) {
387 /* Scan is already in progress. Requeue self. */ 387 /* Scan is already in progress. Requeue self. */
388 schedule(); 388 schedule();
389 queue_delayed_work(priv->workqueue, 389 queue_delayed_work(priv->workqueue, &priv->scan.probe_work,
390 &priv->scan.probe_work, HZ / 10); 390 msecs_to_jiffies(100));
391 mutex_unlock(&priv->conf_mutex); 391 mutex_unlock(&priv->conf_mutex);
392 return; 392 return;
393 } 393 }
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 5b84664db13b..4a47c7f8a246 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -213,6 +213,7 @@ int cw1200_add_interface(struct ieee80211_hw *dev,
213 /* __le32 auto_calibration_mode = __cpu_to_le32(1); */ 213 /* __le32 auto_calibration_mode = __cpu_to_le32(1); */
214 214
215 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 215 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
216 IEEE80211_VIF_SUPPORTS_UAPSD |
216 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 217 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
217 218
218 mutex_lock(&priv->conf_mutex); 219 mutex_lock(&priv->conf_mutex);
@@ -708,7 +709,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
708 if (sta) 709 if (sta)
709 peer_addr = sta->addr; 710 peer_addr = sta->addr;
710 711
711 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 712 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE |
713 IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
712 714
713 switch (key->cipher) { 715 switch (key->cipher) {
714 case WLAN_CIPHER_SUITE_WEP40: 716 case WLAN_CIPHER_SUITE_WEP40:
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 596525528f50..fd8d83dd4f62 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -145,7 +145,7 @@ static void ap_free_sta(struct ap_data *ap, struct sta_info *sta)
145 if (sta->aid > 0) 145 if (sta->aid > 0)
146 ap->sta_aid[sta->aid - 1] = NULL; 146 ap->sta_aid[sta->aid - 1] = NULL;
147 147
148 if (!sta->ap && sta->u.sta.challenge) 148 if (!sta->ap)
149 kfree(sta->u.sta.challenge); 149 kfree(sta->u.sta.challenge);
150 del_timer_sync(&sta->timer); 150 del_timer_sync(&sta->timer);
151#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 151#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index dc1d20cf64ee..e5665804d986 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3429,9 +3429,7 @@ il3945_setup_deferred_work(struct il_priv *il)
3429 3429
3430 il3945_hw_setup_deferred_work(il); 3430 il3945_hw_setup_deferred_work(il);
3431 3431
3432 init_timer(&il->watchdog); 3432 setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
3433 il->watchdog.data = (unsigned long)il;
3434 il->watchdog.function = il_bg_watchdog;
3435 3433
3436 tasklet_init(&il->irq_tasklet, 3434 tasklet_init(&il->irq_tasklet,
3437 (void (*)(unsigned long))il3945_irq_tasklet, 3435 (void (*)(unsigned long))il3945_irq_tasklet,
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 2748fde4b90c..976f65fe9c38 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -6247,13 +6247,10 @@ il4965_setup_deferred_work(struct il_priv *il)
6247 6247
6248 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work); 6248 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
6249 6249
6250 init_timer(&il->stats_periodic); 6250 setup_timer(&il->stats_periodic, il4965_bg_stats_periodic,
6251 il->stats_periodic.data = (unsigned long)il; 6251 (unsigned long)il);
6252 il->stats_periodic.function = il4965_bg_stats_periodic;
6253 6252
6254 init_timer(&il->watchdog); 6253 setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
6255 il->watchdog.data = (unsigned long)il;
6256 il->watchdog.function = il_bg_watchdog;
6257 6254
6258 tasklet_init(&il->irq_tasklet, 6255 tasklet_init(&il->irq_tasklet,
6259 (void (*)(unsigned long))il4965_irq_tasklet, 6256 (void (*)(unsigned long))il4965_irq_tasklet,
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 0b7f46f0b079..c4d6dd7402d9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -64,22 +64,8 @@
64 * 64 *
65 ******************************************************************************/ 65 ******************************************************************************/
66 66
67/*
68 * module name, copyright, version, etc.
69 */
70#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" 67#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
71
72#ifdef CONFIG_IWLWIFI_DEBUG
73#define VD "d"
74#else
75#define VD
76#endif
77
78#define DRV_VERSION IWLWIFI_VERSION VD
79
80
81MODULE_DESCRIPTION(DRV_DESCRIPTION); 68MODULE_DESCRIPTION(DRV_DESCRIPTION);
82MODULE_VERSION(DRV_VERSION);
83MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 69MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
84MODULE_LICENSE("GPL"); 70MODULE_LICENSE("GPL");
85 71
@@ -1011,13 +997,11 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
1011 if (priv->lib->bt_params) 997 if (priv->lib->bt_params)
1012 iwlagn_bt_setup_deferred_work(priv); 998 iwlagn_bt_setup_deferred_work(priv);
1013 999
1014 init_timer(&priv->statistics_periodic); 1000 setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic,
1015 priv->statistics_periodic.data = (unsigned long)priv; 1001 (unsigned long)priv);
1016 priv->statistics_periodic.function = iwl_bg_statistics_periodic;
1017 1002
1018 init_timer(&priv->ucode_trace); 1003 setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace,
1019 priv->ucode_trace.data = (unsigned long)priv; 1004 (unsigned long)priv);
1020 priv->ucode_trace.function = iwl_bg_ucode_trace;
1021} 1005}
1022 1006
1023void iwl_cancel_deferred_work(struct iwl_priv *priv) 1007void iwl_cancel_deferred_work(struct iwl_priv *priv)
@@ -1244,11 +1228,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1244 trans_cfg.no_reclaim_cmds = no_reclaim_cmds; 1228 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
1245 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); 1229 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
1246 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; 1230 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
1247 if (!iwlwifi_mod_params.wd_disable) 1231 trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED;
1248 trans_cfg.queue_watchdog_timeout = 1232
1249 priv->cfg->base_params->wd_timeout;
1250 else
1251 trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
1252 trans_cfg.command_names = iwl_dvm_cmd_strings; 1233 trans_cfg.command_names = iwl_dvm_cmd_strings;
1253 trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM; 1234 trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
1254 1235
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index acb981a0a0aa..c4736c8834c5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -612,15 +612,10 @@ void iwl_tt_initialize(struct iwl_priv *priv)
612 memset(tt, 0, sizeof(struct iwl_tt_mgmt)); 612 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
613 613
614 tt->state = IWL_TI_0; 614 tt->state = IWL_TI_0;
615 init_timer(&priv->thermal_throttle.ct_kill_exit_tm); 615 setup_timer(&priv->thermal_throttle.ct_kill_exit_tm,
616 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv; 616 iwl_tt_check_exit_ct_kill, (unsigned long)priv);
617 priv->thermal_throttle.ct_kill_exit_tm.function = 617 setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
618 iwl_tt_check_exit_ct_kill; 618 iwl_tt_ready_for_ct_kill, (unsigned long)priv);
619 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
620 priv->thermal_throttle.ct_kill_waiting_tm.data =
621 (unsigned long)priv;
622 priv->thermal_throttle.ct_kill_waiting_tm.function =
623 iwl_tt_ready_for_ct_kill;
624 /* setup deferred ct kill work */ 619 /* setup deferred ct kill work */
625 INIT_WORK(&priv->tt_work, iwl_bg_tt_work); 620 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
626 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); 621 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index d1ce3ce13591..1e40a12de077 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -715,7 +715,7 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
715 fifo = ctx->ac_to_fifo[tid_to_ac[tid]]; 715 fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
716 716
717 iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid, 717 iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
718 buf_size, ssn); 718 buf_size, ssn, 0);
719 719
720 /* 720 /*
721 * If the limit is 0, then it wasn't initialised yet, 721 * If the limit is 0, then it wasn't initialised yet,
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index d5cee1530597..4dbef7e58c2e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -267,7 +267,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
267 for (i = 0; i < n_queues; i++) 267 for (i = 0; i < n_queues; i++)
268 if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED) 268 if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
269 iwl_trans_ac_txq_enable(priv->trans, i, 269 iwl_trans_ac_txq_enable(priv->trans, i,
270 queue_to_txf[i]); 270 queue_to_txf[i], 0);
271 271
272 priv->passive_no_rx = false; 272 priv->passive_no_rx = false;
273 priv->transport_queue_stop = 0; 273 priv->transport_queue_stop = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index a5f9198d5747..97e38d2e2983 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -92,6 +92,12 @@
92#define IWL7265D_NVM_VERSION 0x0c11 92#define IWL7265D_NVM_VERSION 0x0c11
93#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ 93#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
94 94
95/* DCCM offsets and lengths */
96#define IWL7000_DCCM_OFFSET 0x800000
97#define IWL7260_DCCM_LEN 0x14000
98#define IWL3160_DCCM_LEN 0x10000
99#define IWL7265_DCCM_LEN 0x17A00
100
95#define IWL7260_FW_PRE "iwlwifi-7260-" 101#define IWL7260_FW_PRE "iwlwifi-7260-"
96#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode" 102#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
97 103
@@ -138,7 +144,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
138 .led_mode = IWL_LED_RF_STATE, \ 144 .led_mode = IWL_LED_RF_STATE, \
139 .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \ 145 .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \
140 .non_shared_ant = ANT_A, \ 146 .non_shared_ant = ANT_A, \
141 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K 147 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
148 .dccm_offset = IWL7000_DCCM_OFFSET
142 149
143const struct iwl_cfg iwl7260_2ac_cfg = { 150const struct iwl_cfg iwl7260_2ac_cfg = {
144 .name = "Intel(R) Dual Band Wireless AC 7260", 151 .name = "Intel(R) Dual Band Wireless AC 7260",
@@ -149,6 +156,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
149 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 156 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
150 .host_interrupt_operation_mode = true, 157 .host_interrupt_operation_mode = true,
151 .lp_xtal_workaround = true, 158 .lp_xtal_workaround = true,
159 .dccm_len = IWL7260_DCCM_LEN,
152}; 160};
153 161
154const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { 162const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
@@ -161,6 +169,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
161 .high_temp = true, 169 .high_temp = true,
162 .host_interrupt_operation_mode = true, 170 .host_interrupt_operation_mode = true,
163 .lp_xtal_workaround = true, 171 .lp_xtal_workaround = true,
172 .dccm_len = IWL7260_DCCM_LEN,
164}; 173};
165 174
166const struct iwl_cfg iwl7260_2n_cfg = { 175const struct iwl_cfg iwl7260_2n_cfg = {
@@ -172,6 +181,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
172 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 181 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
173 .host_interrupt_operation_mode = true, 182 .host_interrupt_operation_mode = true,
174 .lp_xtal_workaround = true, 183 .lp_xtal_workaround = true,
184 .dccm_len = IWL7260_DCCM_LEN,
175}; 185};
176 186
177const struct iwl_cfg iwl7260_n_cfg = { 187const struct iwl_cfg iwl7260_n_cfg = {
@@ -183,6 +193,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
183 .nvm_calib_ver = IWL7260_TX_POWER_VERSION, 193 .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
184 .host_interrupt_operation_mode = true, 194 .host_interrupt_operation_mode = true,
185 .lp_xtal_workaround = true, 195 .lp_xtal_workaround = true,
196 .dccm_len = IWL7260_DCCM_LEN,
186}; 197};
187 198
188const struct iwl_cfg iwl3160_2ac_cfg = { 199const struct iwl_cfg iwl3160_2ac_cfg = {
@@ -193,6 +204,7 @@ const struct iwl_cfg iwl3160_2ac_cfg = {
193 .nvm_ver = IWL3160_NVM_VERSION, 204 .nvm_ver = IWL3160_NVM_VERSION,
194 .nvm_calib_ver = IWL3160_TX_POWER_VERSION, 205 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
195 .host_interrupt_operation_mode = true, 206 .host_interrupt_operation_mode = true,
207 .dccm_len = IWL3160_DCCM_LEN,
196}; 208};
197 209
198const struct iwl_cfg iwl3160_2n_cfg = { 210const struct iwl_cfg iwl3160_2n_cfg = {
@@ -203,6 +215,7 @@ const struct iwl_cfg iwl3160_2n_cfg = {
203 .nvm_ver = IWL3160_NVM_VERSION, 215 .nvm_ver = IWL3160_NVM_VERSION,
204 .nvm_calib_ver = IWL3160_TX_POWER_VERSION, 216 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
205 .host_interrupt_operation_mode = true, 217 .host_interrupt_operation_mode = true,
218 .dccm_len = IWL3160_DCCM_LEN,
206}; 219};
207 220
208const struct iwl_cfg iwl3160_n_cfg = { 221const struct iwl_cfg iwl3160_n_cfg = {
@@ -213,6 +226,7 @@ const struct iwl_cfg iwl3160_n_cfg = {
213 .nvm_ver = IWL3160_NVM_VERSION, 226 .nvm_ver = IWL3160_NVM_VERSION,
214 .nvm_calib_ver = IWL3160_TX_POWER_VERSION, 227 .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
215 .host_interrupt_operation_mode = true, 228 .host_interrupt_operation_mode = true,
229 .dccm_len = IWL3160_DCCM_LEN,
216}; 230};
217 231
218static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { 232static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
@@ -240,6 +254,7 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
240 .nvm_ver = IWL3165_NVM_VERSION, 254 .nvm_ver = IWL3165_NVM_VERSION,
241 .nvm_calib_ver = IWL3165_TX_POWER_VERSION, 255 .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
242 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, 256 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
257 .dccm_len = IWL7265_DCCM_LEN,
243}; 258};
244 259
245const struct iwl_cfg iwl7265_2ac_cfg = { 260const struct iwl_cfg iwl7265_2ac_cfg = {
@@ -250,6 +265,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
250 .nvm_ver = IWL7265_NVM_VERSION, 265 .nvm_ver = IWL7265_NVM_VERSION,
251 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 266 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
252 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, 267 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
268 .dccm_len = IWL7265_DCCM_LEN,
253}; 269};
254 270
255const struct iwl_cfg iwl7265_2n_cfg = { 271const struct iwl_cfg iwl7265_2n_cfg = {
@@ -260,6 +276,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
260 .nvm_ver = IWL7265_NVM_VERSION, 276 .nvm_ver = IWL7265_NVM_VERSION,
261 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 277 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
262 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, 278 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
279 .dccm_len = IWL7265_DCCM_LEN,
263}; 280};
264 281
265const struct iwl_cfg iwl7265_n_cfg = { 282const struct iwl_cfg iwl7265_n_cfg = {
@@ -270,6 +287,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
270 .nvm_ver = IWL7265_NVM_VERSION, 287 .nvm_ver = IWL7265_NVM_VERSION,
271 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 288 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
272 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, 289 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
290 .dccm_len = IWL7265_DCCM_LEN,
273}; 291};
274 292
275const struct iwl_cfg iwl7265d_2ac_cfg = { 293const struct iwl_cfg iwl7265d_2ac_cfg = {
@@ -280,6 +298,7 @@ const struct iwl_cfg iwl7265d_2ac_cfg = {
280 .nvm_ver = IWL7265D_NVM_VERSION, 298 .nvm_ver = IWL7265D_NVM_VERSION,
281 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 299 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
282 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, 300 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
301 .dccm_len = IWL7265_DCCM_LEN,
283}; 302};
284 303
285const struct iwl_cfg iwl7265d_2n_cfg = { 304const struct iwl_cfg iwl7265d_2n_cfg = {
@@ -290,6 +309,7 @@ const struct iwl_cfg iwl7265d_2n_cfg = {
290 .nvm_ver = IWL7265D_NVM_VERSION, 309 .nvm_ver = IWL7265D_NVM_VERSION,
291 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 310 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
292 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, 311 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
312 .dccm_len = IWL7265_DCCM_LEN,
293}; 313};
294 314
295const struct iwl_cfg iwl7265d_n_cfg = { 315const struct iwl_cfg iwl7265d_n_cfg = {
@@ -300,6 +320,7 @@ const struct iwl_cfg iwl7265d_n_cfg = {
300 .nvm_ver = IWL7265D_NVM_VERSION, 320 .nvm_ver = IWL7265D_NVM_VERSION,
301 .nvm_calib_ver = IWL7265_TX_POWER_VERSION, 321 .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
302 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, 322 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
323 .dccm_len = IWL7265_DCCM_LEN,
303}; 324};
304 325
305MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 326MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 3668fc57e770..2f7fe8167dc9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -81,12 +81,21 @@
81#define IWL8000_NVM_VERSION 0x0a1d 81#define IWL8000_NVM_VERSION 0x0a1d
82#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */ 82#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
83 83
84/* Memory offsets and lengths */
85#define IWL8260_DCCM_OFFSET 0x800000
86#define IWL8260_DCCM_LEN 0x18000
87#define IWL8260_DCCM2_OFFSET 0x880000
88#define IWL8260_DCCM2_LEN 0x8000
89#define IWL8260_SMEM_OFFSET 0x400000
90#define IWL8260_SMEM_LEN 0x68000
91
84#define IWL8000_FW_PRE "iwlwifi-8000" 92#define IWL8000_FW_PRE "iwlwifi-8000"
85#define IWL8000_MODULE_FIRMWARE(api) \ 93#define IWL8000_MODULE_FIRMWARE(api) \
86 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 94 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
87 95
88#define NVM_HW_SECTION_NUM_FAMILY_8000 10 96#define NVM_HW_SECTION_NUM_FAMILY_8000 10
89#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin" 97#define DEFAULT_NVM_FILE_FAMILY_8000A "iwl_nvm_8000.bin"
98#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000B.bin"
90 99
91/* Max SDIO RX aggregation size of the ADDBA request/response */ 100/* Max SDIO RX aggregation size of the ADDBA request/response */
92#define MAX_RX_AGG_SIZE_8260_SDIO 28 101#define MAX_RX_AGG_SIZE_8260_SDIO 28
@@ -124,7 +133,13 @@ static const struct iwl_ht_params iwl8000_ht_params = {
124 .led_mode = IWL_LED_RF_STATE, \ 133 .led_mode = IWL_LED_RF_STATE, \
125 .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ 134 .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
126 .d0i3 = true, \ 135 .d0i3 = true, \
127 .non_shared_ant = ANT_A 136 .non_shared_ant = ANT_A, \
137 .dccm_offset = IWL8260_DCCM_OFFSET, \
138 .dccm_len = IWL8260_DCCM_LEN, \
139 .dccm2_offset = IWL8260_DCCM2_OFFSET, \
140 .dccm2_len = IWL8260_DCCM2_LEN, \
141 .smem_offset = IWL8260_SMEM_OFFSET, \
142 .smem_len = IWL8260_SMEM_LEN
128 143
129const struct iwl_cfg iwl8260_2n_cfg = { 144const struct iwl_cfg iwl8260_2n_cfg = {
130 .name = "Intel(R) Dual Band Wireless N 8260", 145 .name = "Intel(R) Dual Band Wireless N 8260",
@@ -145,6 +160,16 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
145 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 160 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
146}; 161};
147 162
163const struct iwl_cfg iwl4165_2ac_cfg = {
164 .name = "Intel(R) Dual Band Wireless AC 4165",
165 .fw_name_pre = IWL8000_FW_PRE,
166 IWL_DEVICE_8000,
167 .ht_params = &iwl8000_ht_params,
168 .nvm_ver = IWL8000_NVM_VERSION,
169 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
170 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
171};
172
148const struct iwl_cfg iwl8260_2ac_sdio_cfg = { 173const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
149 .name = "Intel(R) Dual Band Wireless-AC 8260", 174 .name = "Intel(R) Dual Band Wireless-AC 8260",
150 .fw_name_pre = IWL8000_FW_PRE, 175 .fw_name_pre = IWL8000_FW_PRE,
@@ -153,6 +178,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
153 .nvm_ver = IWL8000_NVM_VERSION, 178 .nvm_ver = IWL8000_NVM_VERSION,
154 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 179 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
155 .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000, 180 .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
181 .default_nvm_file_8000A = DEFAULT_NVM_FILE_FAMILY_8000A,
156 .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, 182 .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
157 .disable_dummy_notification = true, 183 .disable_dummy_notification = true,
158 .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, 184 .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@@ -167,6 +193,7 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
167 .nvm_ver = IWL8000_NVM_VERSION, 193 .nvm_ver = IWL8000_NVM_VERSION,
168 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 194 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
169 .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000, 195 .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
196 .default_nvm_file_8000A = DEFAULT_NVM_FILE_FAMILY_8000A,
170 .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, 197 .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
171 .bt_shared_single_ant = true, 198 .bt_shared_single_ant = true,
172 .disable_dummy_notification = true, 199 .disable_dummy_notification = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 3a4b9c7fc083..4b190d98a1ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -126,7 +126,7 @@ enum iwl_led_mode {
126 126
127/* TX queue watchdog timeouts in mSecs */ 127/* TX queue watchdog timeouts in mSecs */
128#define IWL_WATCHDOG_DISABLED 0 128#define IWL_WATCHDOG_DISABLED 0
129#define IWL_DEF_WD_TIMEOUT 2000 129#define IWL_DEF_WD_TIMEOUT 2500
130#define IWL_LONG_WD_TIMEOUT 10000 130#define IWL_LONG_WD_TIMEOUT 10000
131#define IWL_MAX_WD_TIMEOUT 120000 131#define IWL_MAX_WD_TIMEOUT 120000
132 132
@@ -261,6 +261,12 @@ struct iwl_pwr_tx_backoff {
261 * station can receive in HT 261 * station can receive in HT
262 * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the 262 * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the
263 * station can receive in VHT 263 * station can receive in VHT
264 * @dccm_offset: offset from which DCCM begins
265 * @dccm_len: length of DCCM (including runtime stack CCM)
266 * @dccm2_offset: offset from which the second DCCM begins
267 * @dccm2_len: length of the second DCCM
268 * @smem_offset: offset from which the SMEM begins
269 * @smem_len: the length of SMEM
264 * 270 *
265 * We enable the driver to be backward compatible wrt. hardware features. 271 * We enable the driver to be backward compatible wrt. hardware features.
266 * API differences in uCode shouldn't be handled here but through TLVs 272 * API differences in uCode shouldn't be handled here but through TLVs
@@ -298,11 +304,18 @@ struct iwl_cfg {
298 const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; 304 const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
299 bool no_power_up_nic_in_init; 305 bool no_power_up_nic_in_init;
300 const char *default_nvm_file; 306 const char *default_nvm_file;
307 const char *default_nvm_file_8000A;
301 unsigned int max_rx_agg_size; 308 unsigned int max_rx_agg_size;
302 bool disable_dummy_notification; 309 bool disable_dummy_notification;
303 unsigned int max_tx_agg_size; 310 unsigned int max_tx_agg_size;
304 unsigned int max_ht_ampdu_exponent; 311 unsigned int max_ht_ampdu_exponent;
305 unsigned int max_vht_ampdu_exponent; 312 unsigned int max_vht_ampdu_exponent;
313 const u32 dccm_offset;
314 const u32 dccm_len;
315 const u32 dccm2_offset;
316 const u32 dccm2_len;
317 const u32 smem_offset;
318 const u32 smem_len;
306}; 319};
307 320
308/* 321/*
@@ -369,8 +382,8 @@ extern const struct iwl_cfg iwl7265d_2n_cfg;
369extern const struct iwl_cfg iwl7265d_n_cfg; 382extern const struct iwl_cfg iwl7265d_n_cfg;
370extern const struct iwl_cfg iwl8260_2n_cfg; 383extern const struct iwl_cfg iwl8260_2n_cfg;
371extern const struct iwl_cfg iwl8260_2ac_cfg; 384extern const struct iwl_cfg iwl8260_2ac_cfg;
385extern const struct iwl_cfg iwl4165_2ac_cfg;
372extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; 386extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
373extern const struct iwl_cfg iwl4265_2ac_sdio_cfg;
374extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; 387extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
375#endif /* CONFIG_IWLMVM */ 388#endif /* CONFIG_IWLMVM */
376 389
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index aff63c3f5bf8..faa17f2e352a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -184,6 +184,7 @@
184#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ 184#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
185#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ 185#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
186#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ 186#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
187#define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000)
187#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */ 188#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
188 189
189#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5) 190#define CSR_MBOX_SET_REG_OS_ALIVE BIT(5)
@@ -306,6 +307,7 @@
306enum { 307enum {
307 SILICON_A_STEP = 0, 308 SILICON_A_STEP = 0,
308 SILICON_B_STEP, 309 SILICON_B_STEP,
310 SILICON_C_STEP,
309}; 311};
310 312
311 313
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 850b85a47806..996e7f16adf9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -84,21 +84,8 @@
84 * 84 *
85 ******************************************************************************/ 85 ******************************************************************************/
86 86
87/*
88 * module name, copyright, version, etc.
89 */
90#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux" 87#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
91
92#ifdef CONFIG_IWLWIFI_DEBUG
93#define VD "d"
94#else
95#define VD
96#endif
97
98#define DRV_VERSION IWLWIFI_VERSION VD
99
100MODULE_DESCRIPTION(DRV_DESCRIPTION); 88MODULE_DESCRIPTION(DRV_DESCRIPTION);
101MODULE_VERSION(DRV_VERSION);
102MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 89MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
103MODULE_LICENSE("GPL"); 90MODULE_LICENSE("GPL");
104 91
@@ -250,9 +237,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
250 /* 237 /*
251 * Starting 8000B - FW name format has changed. This overwrites the 238 * Starting 8000B - FW name format has changed. This overwrites the
252 * previous name and uses the new format. 239 * previous name and uses the new format.
253 *
254 * TODO:
255 * Once there is only one supported step for 8000 family - delete this!
256 */ 240 */
257 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 241 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
258 char rev_step[2] = { 242 char rev_step[2] = {
@@ -263,13 +247,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
263 if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP) 247 if (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP)
264 rev_step[0] = 0; 248 rev_step[0] = 0;
265 249
266 /*
267 * If hw_rev wasn't set yet - default as B-step. If it IS A-step
268 * we'll reload that FW later instead.
269 */
270 if (drv->trans->hw_rev == 0)
271 rev_step[0] = 'B';
272
273 snprintf(drv->firmware_name, sizeof(drv->firmware_name), 250 snprintf(drv->firmware_name, sizeof(drv->firmware_name),
274 "%s%s-%s.ucode", name_pre, rev_step, tag); 251 "%s%s-%s.ucode", name_pre, rev_step, tag);
275 } 252 }
@@ -926,6 +903,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
926 IWL_UCODE_REGULAR_USNIFFER, 903 IWL_UCODE_REGULAR_USNIFFER,
927 tlv_len); 904 tlv_len);
928 break; 905 break;
906 case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
907 if (tlv_len != sizeof(u32))
908 goto invalid_tlv_len;
909 drv->fw.sdio_adma_addr =
910 le32_to_cpup((__le32 *)tlv_data);
911 break;
929 default: 912 default:
930 IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); 913 IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
931 break; 914 break;
@@ -1082,7 +1065,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1082 u32 api_ver; 1065 u32 api_ver;
1083 int i; 1066 int i;
1084 bool load_module = false; 1067 bool load_module = false;
1085 u32 hw_rev = drv->trans->hw_rev;
1086 1068
1087 fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; 1069 fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
1088 fw->ucode_capa.standard_phy_calibration_size = 1070 fw->ucode_capa.standard_phy_calibration_size =
@@ -1275,50 +1257,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1275 op->name, err); 1257 op->name, err);
1276#endif 1258#endif
1277 } 1259 }
1278
1279 /*
1280 * We may have loaded the wrong FW file in 8000 HW family if it is an
1281 * A-step card, and if drv->trans->hw_rev wasn't properly read when
1282 * the FW file had been loaded. (This might happen in SDIO.) In such a
1283 * case - unload and reload the correct file.
1284 *
1285 * TODO:
1286 * Once there is only one supported step for 8000 family - delete this!
1287 */
1288 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1289 CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_A_STEP &&
1290 drv->trans->hw_rev != hw_rev) {
1291 char firmware_name[32];
1292
1293 /* Free previous FW resources */
1294 if (drv->op_mode)
1295 _iwl_op_mode_stop(drv);
1296 iwl_dealloc_ucode(drv);
1297
1298 /* Build name of correct-step FW */
1299 snprintf(firmware_name, sizeof(firmware_name),
1300 strrchr(drv->firmware_name, '-'));
1301 snprintf(drv->firmware_name, sizeof(drv->firmware_name),
1302 "%s%s", drv->cfg->fw_name_pre, firmware_name);
1303
1304 /* Clear data before loading correct FW */
1305 list_del(&drv->list);
1306
1307 /* Request correct FW file this time */
1308 IWL_DEBUG_INFO(drv, "attempting to load A-step FW %s\n",
1309 drv->firmware_name);
1310 err = request_firmware(&ucode_raw, drv->firmware_name,
1311 drv->trans->dev);
1312 if (err) {
1313 IWL_ERR(drv, "Failed swapping FW!\n");
1314 goto out_unbind;
1315 }
1316
1317 /* Redo callback function - this time with right FW */
1318 iwl_req_fw_callback(ucode_raw, context);
1319 }
1320
1321 kfree(pieces);
1322 return; 1260 return;
1323 1261
1324 try_again: 1262 try_again:
@@ -1429,7 +1367,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
1429 .restart_fw = true, 1367 .restart_fw = true,
1430 .bt_coex_active = true, 1368 .bt_coex_active = true,
1431 .power_level = IWL_POWER_INDEX_1, 1369 .power_level = IWL_POWER_INDEX_1,
1432 .wd_disable = true, 1370 .d0i3_disable = true,
1433#ifndef CONFIG_IWLWIFI_UAPSD 1371#ifndef CONFIG_IWLWIFI_UAPSD
1434 .uapsd_disable = true, 1372 .uapsd_disable = true,
1435#endif /* CONFIG_IWLWIFI_UAPSD */ 1373#endif /* CONFIG_IWLWIFI_UAPSD */
@@ -1492,7 +1430,7 @@ static int __init iwl_drv_init(void)
1492 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) 1430 for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
1493 INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); 1431 INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
1494 1432
1495 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); 1433 pr_info(DRV_DESCRIPTION "\n");
1496 pr_info(DRV_COPYRIGHT "\n"); 1434 pr_info(DRV_COPYRIGHT "\n");
1497 1435
1498#ifdef CONFIG_IWLWIFI_DEBUGFS 1436#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1539,15 +1477,15 @@ module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
1539MODULE_PARM_DESC(antenna_coupling, 1477MODULE_PARM_DESC(antenna_coupling,
1540 "specify antenna coupling in dB (default: 0 dB)"); 1478 "specify antenna coupling in dB (default: 0 dB)");
1541 1479
1542module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
1543MODULE_PARM_DESC(wd_disable,
1544 "Disable stuck queue watchdog timer 0=system default, 1=disable (default: 1)");
1545
1546module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); 1480module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
1547MODULE_PARM_DESC(nvm_file, "NVM file name"); 1481MODULE_PARM_DESC(nvm_file, "NVM file name");
1548 1482
1549module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, 1483module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable,
1550 bool, S_IRUGO); 1484 bool, S_IRUGO);
1485MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)");
1486
1487module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
1488 bool, S_IRUGO | S_IWUSR);
1551#ifdef CONFIG_IWLWIFI_UAPSD 1489#ifdef CONFIG_IWLWIFI_UAPSD
1552MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)"); 1490MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
1553#else 1491#else
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index be4f8972241a..adf522c756e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -68,7 +68,6 @@
68 68
69/* for all modules */ 69/* for all modules */
70#define DRV_NAME "iwlwifi" 70#define DRV_NAME "iwlwifi"
71#define IWLWIFI_VERSION "in-tree:"
72#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation" 71#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation"
73#define DRV_AUTHOR "<ilw@linux.intel.com>" 72#define DRV_AUTHOR "<ilw@linux.intel.com>"
74 73
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index 20a8a64c9fe3..919a2548a92c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -71,7 +71,6 @@
71 71
72/** 72/**
73 * enum iwl_fw_error_dump_type - types of data in the dump file 73 * enum iwl_fw_error_dump_type - types of data in the dump file
74 * @IWL_FW_ERROR_DUMP_SRAM:
75 * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0 74 * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
76 * @IWL_FW_ERROR_DUMP_RXF: 75 * @IWL_FW_ERROR_DUMP_RXF:
77 * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as 76 * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
@@ -82,9 +81,10 @@
82 * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several 81 * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
83 * sections like this in a single file. 82 * sections like this in a single file.
84 * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers 83 * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
84 * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
85 */ 85 */
86enum iwl_fw_error_dump_type { 86enum iwl_fw_error_dump_type {
87 IWL_FW_ERROR_DUMP_SRAM = 0, 87 /* 0 is deprecated */
88 IWL_FW_ERROR_DUMP_CSR = 1, 88 IWL_FW_ERROR_DUMP_CSR = 1,
89 IWL_FW_ERROR_DUMP_RXF = 2, 89 IWL_FW_ERROR_DUMP_RXF = 2,
90 IWL_FW_ERROR_DUMP_TXCMD = 3, 90 IWL_FW_ERROR_DUMP_TXCMD = 3,
@@ -93,6 +93,7 @@ enum iwl_fw_error_dump_type {
93 IWL_FW_ERROR_DUMP_PRPH = 6, 93 IWL_FW_ERROR_DUMP_PRPH = 6,
94 IWL_FW_ERROR_DUMP_TXF = 7, 94 IWL_FW_ERROR_DUMP_TXF = 7,
95 IWL_FW_ERROR_DUMP_FH_REGS = 8, 95 IWL_FW_ERROR_DUMP_FH_REGS = 8,
96 IWL_FW_ERROR_DUMP_MEM = 9,
96 97
97 IWL_FW_ERROR_DUMP_MAX, 98 IWL_FW_ERROR_DUMP_MAX,
98}; 99};
@@ -133,6 +134,27 @@ struct iwl_fw_error_dump_txcmd {
133 u8 data[]; 134 u8 data[];
134} __packed; 135} __packed;
135 136
137/**
138 * struct iwl_fw_error_dump_fifo - RX/TX FIFO data
139 * @fifo_num: number of FIFO (starting from 0)
140 * @available_bytes: num of bytes available in FIFO (may be less than FIFO size)
141 * @wr_ptr: position of write pointer
142 * @rd_ptr: position of read pointer
143 * @fence_ptr: position of fence pointer
144 * @fence_mode: the current mode of the fence (before locking) -
145 * 0=follow RD pointer ; 1 = freeze
146 * @data: all of the FIFO's data
147 */
148struct iwl_fw_error_dump_fifo {
149 __le32 fifo_num;
150 __le32 available_bytes;
151 __le32 wr_ptr;
152 __le32 rd_ptr;
153 __le32 fence_ptr;
154 __le32 fence_mode;
155 u8 data[];
156} __packed;
157
136enum iwl_fw_error_dump_family { 158enum iwl_fw_error_dump_family {
137 IWL_FW_ERROR_DUMP_FAMILY_7 = 7, 159 IWL_FW_ERROR_DUMP_FAMILY_7 = 7,
138 IWL_FW_ERROR_DUMP_FAMILY_8 = 8, 160 IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
@@ -180,6 +202,23 @@ struct iwl_fw_error_dump_prph {
180 __le32 data[]; 202 __le32 data[];
181}; 203};
182 204
205enum iwl_fw_error_dump_mem_type {
206 IWL_FW_ERROR_DUMP_MEM_SRAM,
207 IWL_FW_ERROR_DUMP_MEM_SMEM,
208};
209
210/**
211 * struct iwl_fw_error_dump_mem - chunk of memory
212 * @type: %enum iwl_fw_error_dump_mem_type
213 * @offset: the offset from which the memory was read
214 * @data: the content of the memory
215 */
216struct iwl_fw_error_dump_mem {
217 __le32 type;
218 __le32 offset;
219 u8 data[];
220};
221
183/** 222/**
184 * iwl_fw_error_next_data - advance fw error dump data pointer 223 * iwl_fw_error_next_data - advance fw error dump data pointer
185 * @data: previous data block 224 * @data: previous data block
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 660ddb1b7d8a..016d91384681 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -132,6 +132,7 @@ enum iwl_ucode_tlv_type {
132 IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30, 132 IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30,
133 IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, 133 IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
134 IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, 134 IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
135 IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35,
135 IWL_UCODE_TLV_FW_DBG_DEST = 38, 136 IWL_UCODE_TLV_FW_DBG_DEST = 38,
136 IWL_UCODE_TLV_FW_DBG_CONF = 39, 137 IWL_UCODE_TLV_FW_DBG_CONF = 39,
137}; 138};
@@ -234,31 +235,34 @@ enum iwl_ucode_tlv_flag {
234 235
235/** 236/**
236 * enum iwl_ucode_tlv_api - ucode api 237 * enum iwl_ucode_tlv_api - ucode api
237 * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
238 * @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification
239 * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex 238 * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
240 * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
241 * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit. 239 * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
242 * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API. 240 * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
243 * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. 241 * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
244 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time 242 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
245 * longer than the passive one, which is essential for fragmented scan. 243 * longer than the passive one, which is essential for fragmented scan.
244 * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
246 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, 245 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
247 * regardless of the band or the number of the probes. FW will calculate 246 * regardless of the band or the number of the probes. FW will calculate
248 * the actual dwell time. 247 * the actual dwell time.
248 * @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
249 * through the dedicated host command.
249 * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too. 250 * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
251 * @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported.
252 * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
250 */ 253 */
251enum iwl_ucode_tlv_api { 254enum iwl_ucode_tlv_api {
252 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
253 IWL_UCODE_TLV_CAPA_EXTENDED_BEACON = BIT(1),
254 IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), 255 IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
255 IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
256 IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5), 256 IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
257 IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), 257 IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
258 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), 258 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
259 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), 259 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
260 IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
260 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), 261 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
262 IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
261 IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), 263 IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
264 IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17),
265 IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18),
262}; 266};
263 267
264/** 268/**
@@ -266,6 +270,7 @@ enum iwl_ucode_tlv_api {
266 * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 270 * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
267 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory 271 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
268 * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. 272 * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
273 * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
269 * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality 274 * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
270 * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current 275 * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
271 * tx power value into TPC Report action frame and Link Measurement Report 276 * tx power value into TPC Report action frame and Link Measurement Report
@@ -284,6 +289,7 @@ enum iwl_ucode_tlv_capa {
284 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), 289 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
285 IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1), 290 IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1),
286 IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2), 291 IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
292 IWL_UCODE_TLV_CAPA_BEAMFORMER = BIT(3),
287 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6), 293 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
288 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8), 294 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
289 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9), 295 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index e6dc3b870949..ffd785cc67d6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -152,6 +152,8 @@ struct iwl_fw_cscheme_list {
152 * @mvm_fw: indicates this is MVM firmware 152 * @mvm_fw: indicates this is MVM firmware
153 * @cipher_scheme: optional external cipher scheme. 153 * @cipher_scheme: optional external cipher scheme.
154 * @human_readable: human readable version 154 * @human_readable: human readable version
155 * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
156 * we get the ALIVE from the uCode
155 * @dbg_dest_tlv: points to the destination TLV for debug 157 * @dbg_dest_tlv: points to the destination TLV for debug
156 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug 158 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
157 * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries 159 * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
@@ -181,6 +183,8 @@ struct iwl_fw {
181 struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; 183 struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
182 u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; 184 u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
183 185
186 u32 sdio_adma_addr;
187
184 struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; 188 struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
185 struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; 189 struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
186 size_t dbg_conf_tlv_len[FW_DBG_MAX]; 190 size_t dbg_conf_tlv_len[FW_DBG_MAX];
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 7a2cbf6f90db..03250a45272e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -193,11 +193,15 @@ void iwl_force_nmi(struct iwl_trans *trans)
193 * DEVICE_SET_NMI_8000B_REG - is used. 193 * DEVICE_SET_NMI_8000B_REG - is used.
194 */ 194 */
195 if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) || 195 if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) ||
196 (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)) 196 (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)) {
197 iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL); 197 iwl_write_prph(trans, DEVICE_SET_NMI_REG,
198 else 198 DEVICE_SET_NMI_VAL_DRV);
199 iwl_write_prph(trans, DEVICE_SET_NMI_REG,
200 DEVICE_SET_NMI_VAL_HW);
201 } else {
199 iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG, 202 iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
200 DEVICE_SET_NMI_8000B_VAL); 203 DEVICE_SET_NMI_8000B_VAL);
204 }
201} 205}
202IWL_EXPORT_SYMBOL(iwl_force_nmi); 206IWL_EXPORT_SYMBOL(iwl_force_nmi);
203 207
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 71507cf490e6..e8eabd21ccfe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -96,13 +96,13 @@ enum iwl_disable_11n {
96 * use IWL_[DIS,EN]ABLE_HT_* constants 96 * use IWL_[DIS,EN]ABLE_HT_* constants
97 * @amsdu_size_8K: enable 8K amsdu size, default = 0 97 * @amsdu_size_8K: enable 8K amsdu size, default = 0
98 * @restart_fw: restart firmware, default = 1 98 * @restart_fw: restart firmware, default = 1
99 * @wd_disable: disable stuck queue check, default = 1
100 * @bt_coex_active: enable bt coex, default = true 99 * @bt_coex_active: enable bt coex, default = true
101 * @led_mode: system default, default = 0 100 * @led_mode: system default, default = 0
102 * @power_save: enable power save, default = false 101 * @power_save: enable power save, default = false
103 * @power_level: power level, default = 1 102 * @power_level: power level, default = 1
104 * @debug_level: levels are IWL_DL_* 103 * @debug_level: levels are IWL_DL_*
105 * @ant_coupling: antenna coupling in dB, default = 0 104 * @ant_coupling: antenna coupling in dB, default = 0
105 * @d0i3_disable: disable d0i3, default = 1,
106 * @fw_monitor: allow to use firmware monitor 106 * @fw_monitor: allow to use firmware monitor
107 */ 107 */
108struct iwl_mod_params { 108struct iwl_mod_params {
@@ -110,7 +110,6 @@ struct iwl_mod_params {
110 unsigned int disable_11n; 110 unsigned int disable_11n;
111 int amsdu_size_8K; 111 int amsdu_size_8K;
112 bool restart_fw; 112 bool restart_fw;
113 int wd_disable;
114 bool bt_coex_active; 113 bool bt_coex_active;
115 int led_mode; 114 int led_mode;
116 bool power_save; 115 bool power_save;
@@ -121,6 +120,7 @@ struct iwl_mod_params {
121 int ant_coupling; 120 int ant_coupling;
122 char *nvm_file; 121 char *nvm_file;
123 bool uapsd_disable; 122 bool uapsd_disable;
123 bool d0i3_disable;
124 bool fw_monitor; 124 bool fw_monitor;
125}; 125};
126 126
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 06e02fcd6f7b..c74f1a4edf23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -468,6 +468,8 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
468 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg); 468 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg);
469 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg); 469 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg);
470 data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg); 470 data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg);
471 data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(radio_cfg);
472 data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg);
471} 473}
472 474
473static void iwl_set_hw_address(const struct iwl_cfg *cfg, 475static void iwl_set_hw_address(const struct iwl_cfg *cfg,
@@ -592,6 +594,10 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
592 594
593 radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw); 595 radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw);
594 iwl_set_radio_cfg(cfg, data, radio_cfg); 596 iwl_set_radio_cfg(cfg, data, radio_cfg);
597 if (data->valid_tx_ant)
598 tx_chains &= data->valid_tx_ant;
599 if (data->valid_rx_ant)
600 rx_chains &= data->valid_rx_ant;
595 601
596 sku = iwl_get_sku(cfg, nvm_sw); 602 sku = iwl_get_sku(cfg, nvm_sw);
597 data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; 603 data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 2df51eab1348..6221e4dfc64f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -99,6 +99,7 @@
99 99
100#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200) 100#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200)
101#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 101#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
102#define APMG_PCIDEV_STT_VAL_WAKE_ME (0x00004000)
102 103
103#define APMG_RTC_INT_STT_RFKILL (0x10000000) 104#define APMG_RTC_INT_STT_RFKILL (0x10000000)
104 105
@@ -107,7 +108,8 @@
107 108
108/* Device NMI register */ 109/* Device NMI register */
109#define DEVICE_SET_NMI_REG 0x00a01c30 110#define DEVICE_SET_NMI_REG 0x00a01c30
110#define DEVICE_SET_NMI_VAL 0x1 111#define DEVICE_SET_NMI_VAL_HW BIT(0)
112#define DEVICE_SET_NMI_VAL_DRV BIT(7)
111#define DEVICE_SET_NMI_8000B_REG 0x00a01c24 113#define DEVICE_SET_NMI_8000B_REG 0x00a01c24
112#define DEVICE_SET_NMI_8000B_VAL 0x1000000 114#define DEVICE_SET_NMI_8000B_VAL 0x1000000
113 115
@@ -250,6 +252,7 @@
250#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F) 252#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
251#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) 253#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
252#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) 254#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
255#define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0)
253 256
254/* Context Data */ 257/* Context Data */
255#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600) 258#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600)
@@ -283,32 +286,9 @@
283#define SCD_CHAINEXT_EN (SCD_BASE + 0x244) 286#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
284#define SCD_AGGR_SEL (SCD_BASE + 0x248) 287#define SCD_AGGR_SEL (SCD_BASE + 0x248)
285#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) 288#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
289#define SCD_GP_CTRL (SCD_BASE + 0x1a8)
286#define SCD_EN_CTRL (SCD_BASE + 0x254) 290#define SCD_EN_CTRL (SCD_BASE + 0x254)
287 291
288static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
289{
290 if (chnl < 20)
291 return SCD_BASE + 0x18 + chnl * 4;
292 WARN_ON_ONCE(chnl >= 32);
293 return SCD_BASE + 0x284 + (chnl - 20) * 4;
294}
295
296static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
297{
298 if (chnl < 20)
299 return SCD_BASE + 0x68 + chnl * 4;
300 WARN_ON_ONCE(chnl >= 32);
301 return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
302}
303
304static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
305{
306 if (chnl < 20)
307 return SCD_BASE + 0x10c + chnl * 4;
308 WARN_ON_ONCE(chnl >= 32);
309 return SCD_BASE + 0x384 + (chnl - 20) * 4;
310}
311
312/*********************** END TX SCHEDULER *************************************/ 292/*********************** END TX SCHEDULER *************************************/
313 293
314/* Oscillator clock */ 294/* Oscillator clock */
@@ -358,18 +338,40 @@ enum secure_load_status_reg {
358 338
359/* Rx FIFO */ 339/* Rx FIFO */
360#define RXF_SIZE_ADDR (0xa00c88) 340#define RXF_SIZE_ADDR (0xa00c88)
341#define RXF_RD_D_SPACE (0xa00c40)
342#define RXF_RD_WR_PTR (0xa00c50)
343#define RXF_RD_RD_PTR (0xa00c54)
344#define RXF_RD_FENCE_PTR (0xa00c4c)
345#define RXF_SET_FENCE_MODE (0xa00c14)
346#define RXF_LD_WR2FENCE (0xa00c1c)
347#define RXF_FIFO_RD_FENCE_INC (0xa00c68)
361#define RXF_SIZE_BYTE_CND_POS (7) 348#define RXF_SIZE_BYTE_CND_POS (7)
362#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS) 349#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
350#define RXF_DIFF_FROM_PREV (0x200)
363 351
364#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10) 352#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
365#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c) 353#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
366 354
355/* Tx FIFO */
356#define TXF_FIFO_ITEM_CNT (0xa00438)
357#define TXF_WR_PTR (0xa00414)
358#define TXF_RD_PTR (0xa00410)
359#define TXF_FENCE_PTR (0xa00418)
360#define TXF_LOCK_FENCE (0xa00424)
361#define TXF_LARC_NUM (0xa0043c)
362#define TXF_READ_MODIFY_DATA (0xa00448)
363#define TXF_READ_MODIFY_ADDR (0xa0044c)
364
367/* FW monitor */ 365/* FW monitor */
366#define MON_BUFF_SAMPLE_CTL (0xa03c00)
368#define MON_BUFF_BASE_ADDR (0xa03c3c) 367#define MON_BUFF_BASE_ADDR (0xa03c3c)
369#define MON_BUFF_END_ADDR (0xa03c40) 368#define MON_BUFF_END_ADDR (0xa03c40)
370#define MON_BUFF_WRPTR (0xa03c44) 369#define MON_BUFF_WRPTR (0xa03c44)
371#define MON_BUFF_CYCLE_CNT (0xa03c48) 370#define MON_BUFF_CYCLE_CNT (0xa03c48)
372 371
372#define DBGC_IN_SAMPLE (0xa03c00)
373#define DBGC_OUT_CTRL (0xa03c0c)
374
373/* FW chicken bits */ 375/* FW chicken bits */
374#define LMPM_CHICK 0xA01FF8 376#define LMPM_CHICK 0xA01FF8
375enum { 377enum {
diff --git a/drivers/net/wireless/iwlwifi/iwl-scd.h b/drivers/net/wireless/iwlwifi/iwl-scd.h
index 6c622b21bba7..f2353ebf2666 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scd.h
+++ b/drivers/net/wireless/iwlwifi/iwl-scd.h
@@ -69,14 +69,6 @@
69#include "iwl-prph.h" 69#include "iwl-prph.h"
70 70
71 71
72static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans,
73 u16 txq_id)
74{
75 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
76 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
77 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
78}
79
80static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans, 72static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans,
81 u16 txq_id) 73 u16 txq_id)
82{ 74{
@@ -115,4 +107,37 @@ static inline void iwl_scd_enable_set_active(struct iwl_trans *trans,
115{ 107{
116 iwl_write_prph(trans, SCD_EN_CTRL, value); 108 iwl_write_prph(trans, SCD_EN_CTRL, value);
117} 109}
110
111static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
112{
113 if (chnl < 20)
114 return SCD_BASE + 0x18 + chnl * 4;
115 WARN_ON_ONCE(chnl >= 32);
116 return SCD_BASE + 0x284 + (chnl - 20) * 4;
117}
118
119static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
120{
121 if (chnl < 20)
122 return SCD_BASE + 0x68 + chnl * 4;
123 WARN_ON_ONCE(chnl >= 32);
124 return SCD_BASE + 0x2B4 + chnl * 4;
125}
126
127static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
128{
129 if (chnl < 20)
130 return SCD_BASE + 0x10c + chnl * 4;
131 WARN_ON_ONCE(chnl >= 32);
132 return SCD_BASE + 0x334 + chnl * 4;
133}
134
135static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans,
136 u16 txq_id)
137{
138 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
139 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
140 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
141}
142
118#endif 143#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 028408a6ecba..a96bd8db6ceb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -368,6 +368,7 @@ enum iwl_trans_status {
368 * @cmd_queue: the index of the command queue. 368 * @cmd_queue: the index of the command queue.
369 * Must be set before start_fw. 369 * Must be set before start_fw.
370 * @cmd_fifo: the fifo for host commands 370 * @cmd_fifo: the fifo for host commands
371 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
371 * @no_reclaim_cmds: Some devices erroneously don't set the 372 * @no_reclaim_cmds: Some devices erroneously don't set the
372 * SEQ_RX_FRAME bit on some notifications, this is the 373 * SEQ_RX_FRAME bit on some notifications, this is the
373 * list of such notifications to filter. Max length is 374 * list of such notifications to filter. Max length is
@@ -378,24 +379,26 @@ enum iwl_trans_status {
378 * @bc_table_dword: set to true if the BC table expects the byte count to be 379 * @bc_table_dword: set to true if the BC table expects the byte count to be
379 * in DWORD (as opposed to bytes) 380 * in DWORD (as opposed to bytes)
380 * @scd_set_active: should the transport configure the SCD for HCMD queue 381 * @scd_set_active: should the transport configure the SCD for HCMD queue
381 * @queue_watchdog_timeout: time (in ms) after which queues
382 * are considered stuck and will trigger device restart
383 * @command_names: array of command names, must be 256 entries 382 * @command_names: array of command names, must be 256 entries
384 * (one for each command); for debugging only 383 * (one for each command); for debugging only
384 * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
385 * we get the ALIVE from the uCode
385 */ 386 */
386struct iwl_trans_config { 387struct iwl_trans_config {
387 struct iwl_op_mode *op_mode; 388 struct iwl_op_mode *op_mode;
388 389
389 u8 cmd_queue; 390 u8 cmd_queue;
390 u8 cmd_fifo; 391 u8 cmd_fifo;
392 unsigned int cmd_q_wdg_timeout;
391 const u8 *no_reclaim_cmds; 393 const u8 *no_reclaim_cmds;
392 unsigned int n_no_reclaim_cmds; 394 unsigned int n_no_reclaim_cmds;
393 395
394 bool rx_buf_size_8k; 396 bool rx_buf_size_8k;
395 bool bc_table_dword; 397 bool bc_table_dword;
396 bool scd_set_active; 398 bool scd_set_active;
397 unsigned int queue_watchdog_timeout;
398 const char *const *command_names; 399 const char *const *command_names;
400
401 u32 sdio_adma_addr;
399}; 402};
400 403
401struct iwl_trans_dump_data { 404struct iwl_trans_dump_data {
@@ -507,7 +510,8 @@ struct iwl_trans_ops {
507 struct sk_buff_head *skbs); 510 struct sk_buff_head *skbs);
508 511
509 void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, 512 void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
510 const struct iwl_trans_txq_scd_cfg *cfg); 513 const struct iwl_trans_txq_scd_cfg *cfg,
514 unsigned int queue_wdg_timeout);
511 void (*txq_disable)(struct iwl_trans *trans, int queue, 515 void (*txq_disable)(struct iwl_trans *trans, int queue,
512 bool configure_scd); 516 bool configure_scd);
513 517
@@ -552,6 +556,21 @@ enum iwl_trans_state {
552}; 556};
553 557
554/** 558/**
559 * enum iwl_d0i3_mode - d0i3 mode
560 *
561 * @IWL_D0I3_MODE_OFF - d0i3 is disabled
562 * @IWL_D0I3_MODE_ON_IDLE - enter d0i3 when device is idle
563 * (e.g. no active references)
564 * @IWL_D0I3_MODE_ON_SUSPEND - enter d0i3 only on suspend
565 * (in case of 'any' trigger)
566 */
567enum iwl_d0i3_mode {
568 IWL_D0I3_MODE_OFF = 0,
569 IWL_D0I3_MODE_ON_IDLE,
570 IWL_D0I3_MODE_ON_SUSPEND,
571};
572
573/**
555 * struct iwl_trans - transport common data 574 * struct iwl_trans - transport common data
556 * 575 *
557 * @ops - pointer to iwl_trans_ops 576 * @ops - pointer to iwl_trans_ops
@@ -612,6 +631,8 @@ struct iwl_trans {
612 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX]; 631 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_MAX];
613 u8 dbg_dest_reg_num; 632 u8 dbg_dest_reg_num;
614 633
634 enum iwl_d0i3_mode d0i3_mode;
635
615 /* pointer to trans specific struct */ 636 /* pointer to trans specific struct */
616 /*Ensure that this pointer will always be aligned to sizeof pointer */ 637 /*Ensure that this pointer will always be aligned to sizeof pointer */
617 char trans_specific[0] __aligned(sizeof(void *)); 638 char trans_specific[0] __aligned(sizeof(void *));
@@ -808,19 +829,21 @@ static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
808 829
809static inline void 830static inline void
810iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, 831iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
811 const struct iwl_trans_txq_scd_cfg *cfg) 832 const struct iwl_trans_txq_scd_cfg *cfg,
833 unsigned int queue_wdg_timeout)
812{ 834{
813 might_sleep(); 835 might_sleep();
814 836
815 if (unlikely((trans->state != IWL_TRANS_FW_ALIVE))) 837 if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
816 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 838 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
817 839
818 trans->ops->txq_enable(trans, queue, ssn, cfg); 840 trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
819} 841}
820 842
821static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, 843static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
822 int fifo, int sta_id, int tid, 844 int fifo, int sta_id, int tid,
823 int frame_limit, u16 ssn) 845 int frame_limit, u16 ssn,
846 unsigned int queue_wdg_timeout)
824{ 847{
825 struct iwl_trans_txq_scd_cfg cfg = { 848 struct iwl_trans_txq_scd_cfg cfg = {
826 .fifo = fifo, 849 .fifo = fifo,
@@ -830,11 +853,12 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
830 .aggregate = sta_id >= 0, 853 .aggregate = sta_id >= 0,
831 }; 854 };
832 855
833 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg); 856 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
834} 857}
835 858
836static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, 859static inline
837 int fifo) 860void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
861 unsigned int queue_wdg_timeout)
838{ 862{
839 struct iwl_trans_txq_scd_cfg cfg = { 863 struct iwl_trans_txq_scd_cfg cfg = {
840 .fifo = fifo, 864 .fifo = fifo,
@@ -844,16 +868,16 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
844 .aggregate = false, 868 .aggregate = false,
845 }; 869 };
846 870
847 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg); 871 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
848} 872}
849 873
850static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, 874static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
851 u32 txq_bm) 875 u32 txqs)
852{ 876{
853 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) 877 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
854 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 878 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
855 879
856 return trans->ops->wait_tx_queue_empty(trans, txq_bm); 880 return trans->ops->wait_tx_queue_empty(trans, txqs);
857} 881}
858 882
859static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, 883static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index a3bfda45d9e6..1ec4d55155f7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -342,7 +342,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
342 { 342 {
343 .range = 12, 343 .range = 12,
344 .lut20 = { 344 .lut20 = {
345 cpu_to_le32(0x00000001), cpu_to_le32(0x00000000), 345 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
346 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 346 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
347 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 347 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
348 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 348 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -363,7 +363,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
363 { 363 {
364 .range = 20, 364 .range = 20,
365 .lut20 = { 365 .lut20 = {
366 cpu_to_le32(0x00000002), cpu_to_le32(0x00000000), 366 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
367 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 367 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
368 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 368 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
369 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 369 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -384,7 +384,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
384 { 384 {
385 .range = 21, 385 .range = 21,
386 .lut20 = { 386 .lut20 = {
387 cpu_to_le32(0x00000003), cpu_to_le32(0x00000000), 387 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
388 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 388 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
389 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 389 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
390 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 390 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -405,7 +405,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
405 { 405 {
406 .range = 23, 406 .range = 23,
407 .lut20 = { 407 .lut20 = {
408 cpu_to_le32(0x00000004), cpu_to_le32(0x00000000), 408 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
409 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 409 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
410 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 410 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
411 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 411 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -426,7 +426,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
426 { 426 {
427 .range = 27, 427 .range = 27,
428 .lut20 = { 428 .lut20 = {
429 cpu_to_le32(0x00000005), cpu_to_le32(0x00000000), 429 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
430 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 430 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
431 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 431 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
432 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 432 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -447,7 +447,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
447 { 447 {
448 .range = 30, 448 .range = 30,
449 .lut20 = { 449 .lut20 = {
450 cpu_to_le32(0x00000006), cpu_to_le32(0x00000000), 450 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
451 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 451 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
452 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 452 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
453 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 453 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -468,7 +468,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
468 { 468 {
469 .range = 32, 469 .range = 32,
470 .lut20 = { 470 .lut20 = {
471 cpu_to_le32(0x00000007), cpu_to_le32(0x00000000), 471 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
472 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 472 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
473 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 473 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
474 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 474 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -489,7 +489,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
489 { 489 {
490 .range = 33, 490 .range = 33,
491 .lut20 = { 491 .lut20 = {
492 cpu_to_le32(0x00000008), cpu_to_le32(0x00000000), 492 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
493 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 493 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
494 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 494 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
495 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 495 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -989,7 +989,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
989static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, 989static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
990 struct ieee80211_vif *vif) 990 struct ieee80211_vif *vif)
991{ 991{
992 struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv; 992 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
993 struct iwl_bt_iterator_data *data = _data; 993 struct iwl_bt_iterator_data *data = _data;
994 struct iwl_mvm *mvm = data->mvm; 994 struct iwl_mvm *mvm = data->mvm;
995 995
@@ -1025,7 +1025,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
1025void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1025void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1026 enum ieee80211_rssi_event rssi_event) 1026 enum ieee80211_rssi_event rssi_event)
1027{ 1027{
1028 struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv; 1028 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1029 struct iwl_bt_iterator_data data = { 1029 struct iwl_bt_iterator_data data = {
1030 .mvm = mvm, 1030 .mvm = mvm,
1031 }; 1031 };
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index b3210cfbecc8..d530ef3da107 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -330,7 +330,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
330 { 330 {
331 .range = 12, 331 .range = 12,
332 .lut20 = { 332 .lut20 = {
333 cpu_to_le32(0x00000001), cpu_to_le32(0x00000000), 333 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
334 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 334 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
335 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 335 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
336 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 336 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -351,7 +351,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
351 { 351 {
352 .range = 20, 352 .range = 20,
353 .lut20 = { 353 .lut20 = {
354 cpu_to_le32(0x00000002), cpu_to_le32(0x00000000), 354 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
355 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 355 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
356 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 356 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
357 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 357 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -372,7 +372,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
372 { 372 {
373 .range = 21, 373 .range = 21,
374 .lut20 = { 374 .lut20 = {
375 cpu_to_le32(0x00000003), cpu_to_le32(0x00000000), 375 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
376 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 376 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
377 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 377 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
378 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 378 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -393,7 +393,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
393 { 393 {
394 .range = 23, 394 .range = 23,
395 .lut20 = { 395 .lut20 = {
396 cpu_to_le32(0x00000004), cpu_to_le32(0x00000000), 396 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
397 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 397 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
398 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 398 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
399 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 399 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -414,7 +414,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
414 { 414 {
415 .range = 27, 415 .range = 27,
416 .lut20 = { 416 .lut20 = {
417 cpu_to_le32(0x00000005), cpu_to_le32(0x00000000), 417 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
418 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 418 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
419 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 419 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
420 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 420 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -435,7 +435,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
435 { 435 {
436 .range = 30, 436 .range = 30,
437 .lut20 = { 437 .lut20 = {
438 cpu_to_le32(0x00000006), cpu_to_le32(0x00000000), 438 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
439 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 439 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
440 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 440 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
441 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 441 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -456,7 +456,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
456 { 456 {
457 .range = 32, 457 .range = 32,
458 .lut20 = { 458 .lut20 = {
459 cpu_to_le32(0x00000007), cpu_to_le32(0x00000000), 459 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
460 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 460 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
461 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 461 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
462 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 462 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -477,7 +477,7 @@ static const struct corunning_block_luts antenna_coupling_ranges[] = {
477 { 477 {
478 .range = 33, 478 .range = 33,
479 .lut20 = { 479 .lut20 = {
480 cpu_to_le32(0x00000008), cpu_to_le32(0x00000000), 480 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
481 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 481 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
482 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 482 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
483 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), 483 cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
@@ -1034,7 +1034,7 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
1034static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, 1034static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
1035 struct ieee80211_vif *vif) 1035 struct ieee80211_vif *vif)
1036{ 1036{
1037 struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv; 1037 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1038 struct iwl_bt_iterator_data *data = _data; 1038 struct iwl_bt_iterator_data *data = _data;
1039 struct iwl_mvm *mvm = data->mvm; 1039 struct iwl_mvm *mvm = data->mvm;
1040 1040
@@ -1070,7 +1070,7 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
1070void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1070void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1071 enum ieee80211_rssi_event rssi_event) 1071 enum ieee80211_rssi_event rssi_event)
1072{ 1072{
1073 struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv; 1073 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1074 struct iwl_bt_iterator_data data = { 1074 struct iwl_bt_iterator_data data = {
1075 .mvm = mvm, 1075 .mvm = mvm,
1076 }; 1076 };
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 3bd93476ec1c..beba375489f1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -94,13 +94,42 @@
94#define IWL_MVM_BT_COEX_MPLUT 1 94#define IWL_MVM_BT_COEX_MPLUT 1
95#define IWL_MVM_BT_COEX_RRC 1 95#define IWL_MVM_BT_COEX_RRC 1
96#define IWL_MVM_BT_COEX_TTC 1 96#define IWL_MVM_BT_COEX_TTC 1
97#define IWL_MVM_BT_COEX_MPLUT_REG0 0x28412201 97#define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200
98#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451 98#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451
99#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30 99#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
100#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0 100#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
101#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0 101#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0
102#define IWL_MVM_QUOTA_THRESHOLD 8 102#define IWL_MVM_QUOTA_THRESHOLD 4
103#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 103#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
104#define IWL_MVM_RS_DISABLE_MIMO 0 104#define IWL_MVM_RS_DISABLE_P2P_MIMO 0
105#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
106#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
107#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
108#define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3
109#define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3
110#define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2
111#define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2
112#define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1
113#define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16
114#define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3
115#define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1
116#define IWL_MVM_RS_RATE_MIN_FAILURE_TH 3
117#define IWL_MVM_RS_RATE_MIN_SUCCESS_TH 8
118#define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT 5 /* Seconds */
119#define IWL_MVM_RS_IDLE_TIMEOUT 5 /* Seconds */
120#define IWL_MVM_RS_MISSED_RATE_MAX 15
121#define IWL_MVM_RS_LEGACY_FAILURE_LIMIT 160
122#define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT 480
123#define IWL_MVM_RS_LEGACY_TABLE_COUNT 160
124#define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT 400
125#define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT 4500
126#define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT 1500
127#define IWL_MVM_RS_SR_FORCE_DECREASE 15 /* percent */
128#define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */
129#define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */
130#define IWL_MVM_RS_AGG_DISABLE_START 3
131#define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */
132#define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */
133#define IWL_MVM_RS_TPC_TX_POWER_STEP 3
105 134
106#endif /* __MVM_CONSTANTS_H */ 135#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 744de262373e..14e8fd661889 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -793,7 +793,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
793 struct ieee80211_sta *ap_sta) 793 struct ieee80211_sta *ap_sta)
794{ 794{
795 int ret; 795 int ret;
796 struct iwl_mvm_sta *mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv; 796 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
797 797
798 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ 798 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
799 799
@@ -1137,12 +1137,43 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1137 return ret; 1137 return ret;
1138} 1138}
1139 1139
1140static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
1141{
1142 struct iwl_notification_wait wait_d3;
1143 static const u8 d3_notif[] = { D3_CONFIG_CMD };
1144 int ret;
1145
1146 iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
1147 d3_notif, ARRAY_SIZE(d3_notif),
1148 NULL, NULL);
1149
1150 ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
1151 if (ret)
1152 goto remove_notif;
1153
1154 ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
1155 WARN_ON_ONCE(ret);
1156 return ret;
1157
1158remove_notif:
1159 iwl_remove_notification(&mvm->notif_wait, &wait_d3);
1160 return ret;
1161}
1162
1140int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 1163int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1141{ 1164{
1142 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1165 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1143 1166
1144 iwl_trans_suspend(mvm->trans); 1167 iwl_trans_suspend(mvm->trans);
1145 if (iwl_mvm_is_d0i3_supported(mvm)) { 1168 if (wowlan->any) {
1169 /* 'any' trigger means d0i3 usage */
1170 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
1171 int ret = iwl_mvm_enter_d0i3_sync(mvm);
1172
1173 if (ret)
1174 return ret;
1175 }
1176
1146 mutex_lock(&mvm->d0i3_suspend_mutex); 1177 mutex_lock(&mvm->d0i3_suspend_mutex);
1147 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); 1178 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1148 mutex_unlock(&mvm->d0i3_suspend_mutex); 1179 mutex_unlock(&mvm->d0i3_suspend_mutex);
@@ -1626,7 +1657,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1626 if (IS_ERR_OR_NULL(ap_sta)) 1657 if (IS_ERR_OR_NULL(ap_sta))
1627 goto out_free; 1658 goto out_free;
1628 1659
1629 mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv; 1660 mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
1630 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 1661 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1631 u16 seq = status.qos_seq_ctr[i]; 1662 u16 seq = status.qos_seq_ctr[i];
1632 /* firmware stores last-used value, we store next value */ 1663 /* firmware stores last-used value, we store next value */
@@ -1876,8 +1907,20 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
1876 1907
1877 iwl_trans_resume(mvm->trans); 1908 iwl_trans_resume(mvm->trans);
1878 1909
1879 if (iwl_mvm_is_d0i3_supported(mvm)) 1910 if (mvm->hw->wiphy->wowlan_config->any) {
1911 /* 'any' trigger means d0i3 usage */
1912 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
1913 int ret = iwl_mvm_exit_d0i3(hw->priv);
1914
1915 if (ret)
1916 return ret;
1917 /*
1918 * d0i3 exit will be deferred until reconfig_complete.
1919 * make sure there we are out of d0i3.
1920 */
1921 }
1880 return 0; 1922 return 0;
1923 }
1881 1924
1882 return __iwl_mvm_resume(mvm, false); 1925 return __iwl_mvm_resume(mvm, false);
1883} 1926}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 9aa2311a776c..5fe14591e1c4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -268,7 +268,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
268 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id], 268 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
269 lockdep_is_held(&mvm->mutex)); 269 lockdep_is_held(&mvm->mutex));
270 if (!IS_ERR_OR_NULL(sta)) { 270 if (!IS_ERR_OR_NULL(sta)) {
271 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 271 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
272 272
273 pos += scnprintf(buf+pos, bufsz-pos, 273 pos += scnprintf(buf+pos, bufsz-pos,
274 "ap_sta_id %d - reduced Tx power %d\n", 274 "ap_sta_id %d - reduced Tx power %d\n",
@@ -517,6 +517,34 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
517 return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); 517 return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
518} 518}
519 519
520static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
521 char __user *user_buf,
522 size_t count, loff_t *ppos)
523{
524 struct ieee80211_vif *vif = file->private_data;
525 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
526 char buf[20];
527 int len;
528
529 len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid);
530 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
531}
532
533static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
534 char *buf, size_t count,
535 loff_t *ppos)
536{
537 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
538 struct iwl_mvm *mvm = mvmvif->mvm;
539 bool ret;
540
541 mutex_lock(&mvm->mutex);
542 ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid);
543 mutex_unlock(&mvm->mutex);
544
545 return ret ? count : -EINVAL;
546}
547
520#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ 548#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
521 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) 549 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
522#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ 550#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -531,6 +559,7 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params);
531MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); 559MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
532MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); 560MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
533MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); 561MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
562MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
534 563
535void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 564void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
536{ 565{
@@ -564,6 +593,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
564 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); 593 MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
565 MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 594 MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
566 S_IRUSR | S_IWUSR); 595 S_IRUSR | S_IWUSR);
596 MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir,
597 S_IRUSR | S_IWUSR);
567 598
568 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 599 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
569 mvmvif == mvm->bf_allowed_vif) 600 mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 33bf915cd7ea..82c09d86af8c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -654,10 +654,10 @@ out:
654 return ret ?: count; 654 return ret ?: count;
655} 655}
656 656
657#define PRINT_STATS_LE32(_str, _val) \ 657#define PRINT_STATS_LE32(_struct, _memb) \
658 pos += scnprintf(buf + pos, bufsz - pos, \ 658 pos += scnprintf(buf + pos, bufsz - pos, \
659 fmt_table, _str, \ 659 fmt_table, #_memb, \
660 le32_to_cpu(_val)) 660 le32_to_cpu(_struct->_memb))
661 661
662static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, 662static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
663 char __user *user_buf, size_t count, 663 char __user *user_buf, size_t count,
@@ -692,97 +692,89 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
692 692
693 pos += scnprintf(buf + pos, bufsz - pos, fmt_header, 693 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
694 "Statistics_Rx - OFDM"); 694 "Statistics_Rx - OFDM");
695 PRINT_STATS_LE32("ina_cnt", ofdm->ina_cnt); 695 PRINT_STATS_LE32(ofdm, ina_cnt);
696 PRINT_STATS_LE32("fina_cnt", ofdm->fina_cnt); 696 PRINT_STATS_LE32(ofdm, fina_cnt);
697 PRINT_STATS_LE32("plcp_err", ofdm->plcp_err); 697 PRINT_STATS_LE32(ofdm, plcp_err);
698 PRINT_STATS_LE32("crc32_err", ofdm->crc32_err); 698 PRINT_STATS_LE32(ofdm, crc32_err);
699 PRINT_STATS_LE32("overrun_err", ofdm->overrun_err); 699 PRINT_STATS_LE32(ofdm, overrun_err);
700 PRINT_STATS_LE32("early_overrun_err", ofdm->early_overrun_err); 700 PRINT_STATS_LE32(ofdm, early_overrun_err);
701 PRINT_STATS_LE32("crc32_good", ofdm->crc32_good); 701 PRINT_STATS_LE32(ofdm, crc32_good);
702 PRINT_STATS_LE32("false_alarm_cnt", ofdm->false_alarm_cnt); 702 PRINT_STATS_LE32(ofdm, false_alarm_cnt);
703 PRINT_STATS_LE32("fina_sync_err_cnt", ofdm->fina_sync_err_cnt); 703 PRINT_STATS_LE32(ofdm, fina_sync_err_cnt);
704 PRINT_STATS_LE32("sfd_timeout", ofdm->sfd_timeout); 704 PRINT_STATS_LE32(ofdm, sfd_timeout);
705 PRINT_STATS_LE32("fina_timeout", ofdm->fina_timeout); 705 PRINT_STATS_LE32(ofdm, fina_timeout);
706 PRINT_STATS_LE32("unresponded_rts", ofdm->unresponded_rts); 706 PRINT_STATS_LE32(ofdm, unresponded_rts);
707 PRINT_STATS_LE32("rxe_frame_lmt_overrun", 707 PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
708 ofdm->rxe_frame_limit_overrun); 708 PRINT_STATS_LE32(ofdm, sent_ack_cnt);
709 PRINT_STATS_LE32("sent_ack_cnt", ofdm->sent_ack_cnt); 709 PRINT_STATS_LE32(ofdm, sent_cts_cnt);
710 PRINT_STATS_LE32("sent_cts_cnt", ofdm->sent_cts_cnt); 710 PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
711 PRINT_STATS_LE32("sent_ba_rsp_cnt", ofdm->sent_ba_rsp_cnt); 711 PRINT_STATS_LE32(ofdm, dsp_self_kill);
712 PRINT_STATS_LE32("dsp_self_kill", ofdm->dsp_self_kill); 712 PRINT_STATS_LE32(ofdm, mh_format_err);
713 PRINT_STATS_LE32("mh_format_err", ofdm->mh_format_err); 713 PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum);
714 PRINT_STATS_LE32("re_acq_main_rssi_sum", ofdm->re_acq_main_rssi_sum); 714 PRINT_STATS_LE32(ofdm, reserved);
715 PRINT_STATS_LE32("reserved", ofdm->reserved);
716 715
717 pos += scnprintf(buf + pos, bufsz - pos, fmt_header, 716 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
718 "Statistics_Rx - CCK"); 717 "Statistics_Rx - CCK");
719 PRINT_STATS_LE32("ina_cnt", cck->ina_cnt); 718 PRINT_STATS_LE32(cck, ina_cnt);
720 PRINT_STATS_LE32("fina_cnt", cck->fina_cnt); 719 PRINT_STATS_LE32(cck, fina_cnt);
721 PRINT_STATS_LE32("plcp_err", cck->plcp_err); 720 PRINT_STATS_LE32(cck, plcp_err);
722 PRINT_STATS_LE32("crc32_err", cck->crc32_err); 721 PRINT_STATS_LE32(cck, crc32_err);
723 PRINT_STATS_LE32("overrun_err", cck->overrun_err); 722 PRINT_STATS_LE32(cck, overrun_err);
724 PRINT_STATS_LE32("early_overrun_err", cck->early_overrun_err); 723 PRINT_STATS_LE32(cck, early_overrun_err);
725 PRINT_STATS_LE32("crc32_good", cck->crc32_good); 724 PRINT_STATS_LE32(cck, crc32_good);
726 PRINT_STATS_LE32("false_alarm_cnt", cck->false_alarm_cnt); 725 PRINT_STATS_LE32(cck, false_alarm_cnt);
727 PRINT_STATS_LE32("fina_sync_err_cnt", cck->fina_sync_err_cnt); 726 PRINT_STATS_LE32(cck, fina_sync_err_cnt);
728 PRINT_STATS_LE32("sfd_timeout", cck->sfd_timeout); 727 PRINT_STATS_LE32(cck, sfd_timeout);
729 PRINT_STATS_LE32("fina_timeout", cck->fina_timeout); 728 PRINT_STATS_LE32(cck, fina_timeout);
730 PRINT_STATS_LE32("unresponded_rts", cck->unresponded_rts); 729 PRINT_STATS_LE32(cck, unresponded_rts);
731 PRINT_STATS_LE32("rxe_frame_lmt_overrun", 730 PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
732 cck->rxe_frame_limit_overrun); 731 PRINT_STATS_LE32(cck, sent_ack_cnt);
733 PRINT_STATS_LE32("sent_ack_cnt", cck->sent_ack_cnt); 732 PRINT_STATS_LE32(cck, sent_cts_cnt);
734 PRINT_STATS_LE32("sent_cts_cnt", cck->sent_cts_cnt); 733 PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
735 PRINT_STATS_LE32("sent_ba_rsp_cnt", cck->sent_ba_rsp_cnt); 734 PRINT_STATS_LE32(cck, dsp_self_kill);
736 PRINT_STATS_LE32("dsp_self_kill", cck->dsp_self_kill); 735 PRINT_STATS_LE32(cck, mh_format_err);
737 PRINT_STATS_LE32("mh_format_err", cck->mh_format_err); 736 PRINT_STATS_LE32(cck, re_acq_main_rssi_sum);
738 PRINT_STATS_LE32("re_acq_main_rssi_sum", cck->re_acq_main_rssi_sum); 737 PRINT_STATS_LE32(cck, reserved);
739 PRINT_STATS_LE32("reserved", cck->reserved);
740 738
741 pos += scnprintf(buf + pos, bufsz - pos, fmt_header, 739 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
742 "Statistics_Rx - GENERAL"); 740 "Statistics_Rx - GENERAL");
743 PRINT_STATS_LE32("bogus_cts", general->bogus_cts); 741 PRINT_STATS_LE32(general, bogus_cts);
744 PRINT_STATS_LE32("bogus_ack", general->bogus_ack); 742 PRINT_STATS_LE32(general, bogus_ack);
745 PRINT_STATS_LE32("non_bssid_frames", general->non_bssid_frames); 743 PRINT_STATS_LE32(general, non_bssid_frames);
746 PRINT_STATS_LE32("filtered_frames", general->filtered_frames); 744 PRINT_STATS_LE32(general, filtered_frames);
747 PRINT_STATS_LE32("non_channel_beacons", general->non_channel_beacons); 745 PRINT_STATS_LE32(general, non_channel_beacons);
748 PRINT_STATS_LE32("channel_beacons", general->channel_beacons); 746 PRINT_STATS_LE32(general, channel_beacons);
749 PRINT_STATS_LE32("num_missed_bcon", general->num_missed_bcon); 747 PRINT_STATS_LE32(general, num_missed_bcon);
750 PRINT_STATS_LE32("adc_rx_saturation_time", 748 PRINT_STATS_LE32(general, adc_rx_saturation_time);
751 general->adc_rx_saturation_time); 749 PRINT_STATS_LE32(general, ina_detection_search_time);
752 PRINT_STATS_LE32("ina_detection_search_time", 750 PRINT_STATS_LE32(general, beacon_silence_rssi_a);
753 general->ina_detection_search_time); 751 PRINT_STATS_LE32(general, beacon_silence_rssi_b);
754 PRINT_STATS_LE32("beacon_silence_rssi_a", 752 PRINT_STATS_LE32(general, beacon_silence_rssi_c);
755 general->beacon_silence_rssi_a); 753 PRINT_STATS_LE32(general, interference_data_flag);
756 PRINT_STATS_LE32("beacon_silence_rssi_b", 754 PRINT_STATS_LE32(general, channel_load);
757 general->beacon_silence_rssi_b); 755 PRINT_STATS_LE32(general, dsp_false_alarms);
758 PRINT_STATS_LE32("beacon_silence_rssi_c", 756 PRINT_STATS_LE32(general, beacon_rssi_a);
759 general->beacon_silence_rssi_c); 757 PRINT_STATS_LE32(general, beacon_rssi_b);
760 PRINT_STATS_LE32("interference_data_flag", 758 PRINT_STATS_LE32(general, beacon_rssi_c);
761 general->interference_data_flag); 759 PRINT_STATS_LE32(general, beacon_energy_a);
762 PRINT_STATS_LE32("channel_load", general->channel_load); 760 PRINT_STATS_LE32(general, beacon_energy_b);
763 PRINT_STATS_LE32("dsp_false_alarms", general->dsp_false_alarms); 761 PRINT_STATS_LE32(general, beacon_energy_c);
764 PRINT_STATS_LE32("beacon_rssi_a", general->beacon_rssi_a); 762 PRINT_STATS_LE32(general, num_bt_kills);
765 PRINT_STATS_LE32("beacon_rssi_b", general->beacon_rssi_b); 763 PRINT_STATS_LE32(general, mac_id);
766 PRINT_STATS_LE32("beacon_rssi_c", general->beacon_rssi_c); 764 PRINT_STATS_LE32(general, directed_data_mpdu);
767 PRINT_STATS_LE32("beacon_energy_a", general->beacon_energy_a);
768 PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
769 PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
770 PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
771 PRINT_STATS_LE32("mac_id", general->mac_id);
772 PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
773 765
774 pos += scnprintf(buf + pos, bufsz - pos, fmt_header, 766 pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
775 "Statistics_Rx - HT"); 767 "Statistics_Rx - HT");
776 PRINT_STATS_LE32("plcp_err", ht->plcp_err); 768 PRINT_STATS_LE32(ht, plcp_err);
777 PRINT_STATS_LE32("overrun_err", ht->overrun_err); 769 PRINT_STATS_LE32(ht, overrun_err);
778 PRINT_STATS_LE32("early_overrun_err", ht->early_overrun_err); 770 PRINT_STATS_LE32(ht, early_overrun_err);
779 PRINT_STATS_LE32("crc32_good", ht->crc32_good); 771 PRINT_STATS_LE32(ht, crc32_good);
780 PRINT_STATS_LE32("crc32_err", ht->crc32_err); 772 PRINT_STATS_LE32(ht, crc32_err);
781 PRINT_STATS_LE32("mh_format_err", ht->mh_format_err); 773 PRINT_STATS_LE32(ht, mh_format_err);
782 PRINT_STATS_LE32("agg_crc32_good", ht->agg_crc32_good); 774 PRINT_STATS_LE32(ht, agg_crc32_good);
783 PRINT_STATS_LE32("agg_mpdu_cnt", ht->agg_mpdu_cnt); 775 PRINT_STATS_LE32(ht, agg_mpdu_cnt);
784 PRINT_STATS_LE32("agg_cnt", ht->agg_cnt); 776 PRINT_STATS_LE32(ht, agg_cnt);
785 PRINT_STATS_LE32("unsupport_mcs", ht->unsupport_mcs); 777 PRINT_STATS_LE32(ht, unsupport_mcs);
786 778
787 mutex_unlock(&mvm->mutex); 779 mutex_unlock(&mvm->mutex);
788 780
@@ -933,7 +925,7 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
933 return -EINVAL; 925 return -EINVAL;
934 if (scan_rx_ant > ANT_ABC) 926 if (scan_rx_ant > ANT_ABC)
935 return -EINVAL; 927 return -EINVAL;
936 if (scan_rx_ant & ~mvm->fw->valid_rx_ant) 928 if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm)))
937 return -EINVAL; 929 return -EINVAL;
938 930
939 if (mvm->scan_rx_ant != scan_rx_ant) { 931 if (mvm->scan_rx_ant != scan_rx_ant) {
@@ -945,6 +937,61 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
945 return count; 937 return count;
946} 938}
947 939
940static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
941 char __user *user_buf,
942 size_t count, loff_t *ppos)
943{
944 struct iwl_mvm *mvm = file->private_data;
945 enum iwl_fw_dbg_conf conf;
946 char buf[8];
947 const size_t bufsz = sizeof(buf);
948 int pos = 0;
949
950 mutex_lock(&mvm->mutex);
951 conf = mvm->fw_dbg_conf;
952 mutex_unlock(&mvm->mutex);
953
954 pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
955
956 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
957}
958
959static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
960 char *buf, size_t count,
961 loff_t *ppos)
962{
963 int ret, conf_id;
964
965 ret = kstrtoint(buf, 0, &conf_id);
966 if (ret)
967 return ret;
968
969 if (WARN_ON(conf_id >= FW_DBG_MAX))
970 return -EINVAL;
971
972 mutex_lock(&mvm->mutex);
973 ret = iwl_mvm_start_fw_dbg_conf(mvm, conf_id);
974 mutex_unlock(&mvm->mutex);
975
976 return ret ?: count;
977}
978
979static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
980 char *buf, size_t count,
981 loff_t *ppos)
982{
983 int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
984
985 if (ret)
986 return ret;
987
988 iwl_mvm_fw_dbg_collect(mvm);
989
990 iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
991
992 return count;
993}
994
948#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) 995#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
949#ifdef CONFIG_IWLWIFI_BCAST_FILTERING 996#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
950static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, 997static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
@@ -1340,6 +1387,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
1340 PRINT_MVM_REF(IWL_MVM_REF_TM_CMD); 1387 PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
1341 PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK); 1388 PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
1342 PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA); 1389 PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
1390 PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
1343 1391
1344 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1392 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1345} 1393}
@@ -1439,6 +1487,26 @@ out:
1439 return count; 1487 return count;
1440} 1488}
1441 1489
1490static ssize_t iwl_dbgfs_enable_scan_iteration_notif_write(struct iwl_mvm *mvm,
1491 char *buf,
1492 size_t count,
1493 loff_t *ppos)
1494{
1495 int val;
1496
1497 mutex_lock(&mvm->mutex);
1498
1499 if (kstrtoint(buf, 10, &val)) {
1500 mutex_unlock(&mvm->mutex);
1501 return -EINVAL;
1502 }
1503
1504 mvm->scan_iter_notif_enabled = val;
1505 mutex_unlock(&mvm->mutex);
1506
1507 return count;
1508}
1509
1442MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); 1510MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
1443 1511
1444/* Device wide debugfs entries */ 1512/* Device wide debugfs entries */
@@ -1459,6 +1527,9 @@ MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
1459MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); 1527MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
1460MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); 1528MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
1461MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); 1529MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
1530MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
1531MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 8);
1532MVM_DEBUGFS_WRITE_FILE_OPS(enable_scan_iteration_notif, 8);
1462 1533
1463#ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1534#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1464MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); 1535MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
@@ -1500,6 +1571,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
1500 S_IWUSR | S_IRUSR); 1571 S_IWUSR | S_IRUSR);
1501 MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR); 1572 MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
1502 MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR); 1573 MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1574 MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1575 MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
1576 MVM_DEBUGFS_ADD_FILE(enable_scan_iteration_notif, mvm->debugfs_dir,
1577 S_IWUSR);
1503 1578
1504#ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1579#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1505 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { 1580 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 430020047b77..4fc0938b3fb6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -92,14 +92,32 @@ enum iwl_ltr_config_flags {
92}; 92};
93 93
94/** 94/**
95 * struct iwl_ltr_config_cmd_v1 - configures the LTR
96 * @flags: See %enum iwl_ltr_config_flags
97 */
98struct iwl_ltr_config_cmd_v1 {
99 __le32 flags;
100 __le32 static_long;
101 __le32 static_short;
102} __packed; /* LTR_CAPABLE_API_S_VER_1 */
103
104#define LTR_VALID_STATES_NUM 4
105
106/**
95 * struct iwl_ltr_config_cmd - configures the LTR 107 * struct iwl_ltr_config_cmd - configures the LTR
96 * @flags: See %enum iwl_ltr_config_flags 108 * @flags: See %enum iwl_ltr_config_flags
109 * @static_long:
110 * @static_short:
111 * @ltr_cfg_values:
112 * @ltr_short_idle_timeout:
97 */ 113 */
98struct iwl_ltr_config_cmd { 114struct iwl_ltr_config_cmd {
99 __le32 flags; 115 __le32 flags;
100 __le32 static_long; 116 __le32 static_long;
101 __le32 static_short; 117 __le32 static_short;
102} __packed; 118 __le32 ltr_cfg_values[LTR_VALID_STATES_NUM];
119 __le32 ltr_short_idle_timeout;
120} __packed; /* LTR_CAPABLE_API_S_VER_2 */
103 121
104/* Radio LP RX Energy Threshold measured in dBm */ 122/* Radio LP RX Energy Threshold measured in dBm */
105#define POWER_LPRX_RSSI_THRESHOLD 75 123#define POWER_LPRX_RSSI_THRESHOLD 75
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 8bb5b94bf963..0f1ea80a55ef 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -308,6 +308,42 @@ enum {
308#define LQ_FLAG_DYNAMIC_BW_POS 6 308#define LQ_FLAG_DYNAMIC_BW_POS 6
309#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS) 309#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS)
310 310
311/* Single Stream Tx Parameters (lq_cmd->ss_params)
312 * Flags to control a smart FW decision about whether BFER/STBC/SISO will be
313 * used for single stream Tx.
314 */
315
316/* Bit 0-1: Max STBC streams allowed. Can be 0-3.
317 * (0) - No STBC allowed
318 * (1) - 2x1 STBC allowed (HT/VHT)
319 * (2) - 4x2 STBC allowed (HT/VHT)
320 * (3) - 3x2 STBC allowed (HT only)
321 * All our chips are at most 2 antennas so only (1) is valid for now.
322 */
323#define LQ_SS_STBC_ALLOWED_POS 0
324#define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK)
325
326/* 2x1 STBC is allowed */
327#define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS)
328
329/* Bit 2: Beamformer (VHT only) is allowed */
330#define LQ_SS_BFER_ALLOWED_POS 2
331#define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS)
332
333/* Bit 3: Force BFER or STBC for testing
334 * If this is set:
335 * If BFER is allowed then force the ucode to choose BFER else
336 * If STBC is allowed then force the ucode to choose STBC over SISO
337 */
338#define LQ_SS_FORCE_POS 3
339#define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS)
340
341/* Bit 31: ss_params field is valid. Used for FW backward compatibility
342 * with other drivers which don't support the ss_params API yet
343 */
344#define LQ_SS_PARAMS_VALID_POS 31
345#define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS)
346
311/** 347/**
312 * struct iwl_lq_cmd - link quality command 348 * struct iwl_lq_cmd - link quality command
313 * @sta_id: station to update 349 * @sta_id: station to update
@@ -330,7 +366,7 @@ enum {
330 * 2 - 0x3f: maximal number of frames (up to 3f == 63) 366 * 2 - 0x3f: maximal number of frames (up to 3f == 63)
331 * @rs_table: array of rates for each TX try, each is rate_n_flags, 367 * @rs_table: array of rates for each TX try, each is rate_n_flags,
332 * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP 368 * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP
333 * @bf_params: beam forming params, currently not used 369 * @ss_params: single stream features. declare whether STBC or BFER are allowed.
334 */ 370 */
335struct iwl_lq_cmd { 371struct iwl_lq_cmd {
336 u8 sta_id; 372 u8 sta_id;
@@ -348,6 +384,6 @@ struct iwl_lq_cmd {
348 u8 agg_frame_cnt_limit; 384 u8 agg_frame_cnt_limit;
349 __le32 reserved2; 385 __le32 reserved2;
350 __le32 rs_table[LQ_MAX_RETRY_NUM]; 386 __le32 rs_table[LQ_MAX_RETRY_NUM];
351 __le32 bf_params; 387 __le32 ss_params;
352}; /* LINK_QUALITY_CMD_API_S_VER_1 */ 388}; /* LINK_QUALITY_CMD_API_S_VER_1 */
353#endif /* __fw_api_rs_h__ */ 389#endif /* __fw_api_rs_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
new file mode 100644
index 000000000000..928168b18346
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
@@ -0,0 +1,277 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66#ifndef __fw_api_stats_h__
67#define __fw_api_stats_h__
68
69struct mvm_statistics_dbg {
70 __le32 burst_check;
71 __le32 burst_count;
72 __le32 wait_for_silence_timeout_cnt;
73 __le32 reserved[3];
74} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
75
76struct mvm_statistics_div {
77 __le32 tx_on_a;
78 __le32 tx_on_b;
79 __le32 exec_time;
80 __le32 probe_time;
81 __le32 rssi_ant;
82 __le32 reserved2;
83} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
84
85struct mvm_statistics_rx_non_phy {
86 __le32 bogus_cts; /* CTS received when not expecting CTS */
87 __le32 bogus_ack; /* ACK received when not expecting ACK */
88 __le32 non_bssid_frames; /* number of frames with BSSID that
89 * doesn't belong to the STA BSSID */
90 __le32 filtered_frames; /* count frames that were dumped in the
91 * filtering process */
92 __le32 non_channel_beacons; /* beacons with our bss id but not on
93 * our serving channel */
94 __le32 channel_beacons; /* beacons with our bss id and in our
95 * serving channel */
96 __le32 num_missed_bcon; /* number of missed beacons */
97 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
98 * ADC was in saturation */
99 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
100 * for INA */
101 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
102 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
103 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
104 __le32 interference_data_flag; /* flag for interference data
105 * availability. 1 when data is
106 * available. */
107 __le32 channel_load; /* counts RX Enable time in uSec */
108 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
109 * and CCK) counter */
110 __le32 beacon_rssi_a;
111 __le32 beacon_rssi_b;
112 __le32 beacon_rssi_c;
113 __le32 beacon_energy_a;
114 __le32 beacon_energy_b;
115 __le32 beacon_energy_c;
116 __le32 num_bt_kills;
117 __le32 mac_id;
118 __le32 directed_data_mpdu;
119} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
120
121struct mvm_statistics_rx_phy {
122 __le32 ina_cnt;
123 __le32 fina_cnt;
124 __le32 plcp_err;
125 __le32 crc32_err;
126 __le32 overrun_err;
127 __le32 early_overrun_err;
128 __le32 crc32_good;
129 __le32 false_alarm_cnt;
130 __le32 fina_sync_err_cnt;
131 __le32 sfd_timeout;
132 __le32 fina_timeout;
133 __le32 unresponded_rts;
134 __le32 rxe_frame_lmt_overrun;
135 __le32 sent_ack_cnt;
136 __le32 sent_cts_cnt;
137 __le32 sent_ba_rsp_cnt;
138 __le32 dsp_self_kill;
139 __le32 mh_format_err;
140 __le32 re_acq_main_rssi_sum;
141 __le32 reserved;
142} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
143
144struct mvm_statistics_rx_ht_phy {
145 __le32 plcp_err;
146 __le32 overrun_err;
147 __le32 early_overrun_err;
148 __le32 crc32_good;
149 __le32 crc32_err;
150 __le32 mh_format_err;
151 __le32 agg_crc32_good;
152 __le32 agg_mpdu_cnt;
153 __le32 agg_cnt;
154 __le32 unsupport_mcs;
155} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
156
157struct mvm_statistics_tx_non_phy {
158 __le32 preamble_cnt;
159 __le32 rx_detected_cnt;
160 __le32 bt_prio_defer_cnt;
161 __le32 bt_prio_kill_cnt;
162 __le32 few_bytes_cnt;
163 __le32 cts_timeout;
164 __le32 ack_timeout;
165 __le32 expected_ack_cnt;
166 __le32 actual_ack_cnt;
167 __le32 dump_msdu_cnt;
168 __le32 burst_abort_next_frame_mismatch_cnt;
169 __le32 burst_abort_missing_next_frame_cnt;
170 __le32 cts_timeout_collision;
171 __le32 ack_or_ba_timeout_collision;
172} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */
173
174#define MAX_CHAINS 3
175
176struct mvm_statistics_tx_non_phy_agg {
177 __le32 ba_timeout;
178 __le32 ba_reschedule_frames;
179 __le32 scd_query_agg_frame_cnt;
180 __le32 scd_query_no_agg;
181 __le32 scd_query_agg;
182 __le32 scd_query_mismatch;
183 __le32 frame_not_ready;
184 __le32 underrun;
185 __le32 bt_prio_kill;
186 __le32 rx_ba_rsp_cnt;
187 __s8 txpower[MAX_CHAINS];
188 __s8 reserved;
189 __le32 reserved2;
190} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
191
192struct mvm_statistics_tx_channel_width {
193 __le32 ext_cca_narrow_ch20[1];
194 __le32 ext_cca_narrow_ch40[2];
195 __le32 ext_cca_narrow_ch80[3];
196 __le32 ext_cca_narrow_ch160[4];
197 __le32 last_tx_ch_width_indx;
198 __le32 rx_detected_per_ch_width[4];
199 __le32 success_per_ch_width[4];
200 __le32 fail_per_ch_width[4];
201}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
202
203struct mvm_statistics_tx {
204 struct mvm_statistics_tx_non_phy general;
205 struct mvm_statistics_tx_non_phy_agg agg;
206 struct mvm_statistics_tx_channel_width channel_width;
207} __packed; /* STATISTICS_TX_API_S_VER_4 */
208
209
210struct mvm_statistics_bt_activity {
211 __le32 hi_priority_tx_req_cnt;
212 __le32 hi_priority_tx_denied_cnt;
213 __le32 lo_priority_tx_req_cnt;
214 __le32 lo_priority_tx_denied_cnt;
215 __le32 hi_priority_rx_req_cnt;
216 __le32 hi_priority_rx_denied_cnt;
217 __le32 lo_priority_rx_req_cnt;
218 __le32 lo_priority_rx_denied_cnt;
219} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
220
221struct mvm_statistics_general {
222 __le32 radio_temperature;
223 __le32 radio_voltage;
224 struct mvm_statistics_dbg dbg;
225 __le32 sleep_time;
226 __le32 slots_out;
227 __le32 slots_idle;
228 __le32 ttl_timestamp;
229 struct mvm_statistics_div slow_div;
230 __le32 rx_enable_counter;
231 /*
232 * num_of_sos_states:
233 * count the number of times we have to re-tune
234 * in order to get out of bad PHY status
235 */
236 __le32 num_of_sos_states;
237 __le32 beacon_filtered;
238 __le32 missed_beacons;
239 __s8 beacon_filter_average_energy;
240 __s8 beacon_filter_reason;
241 __s8 beacon_filter_current_energy;
242 __s8 beacon_filter_reserved;
243 __le32 beacon_filter_delta_time;
244 struct mvm_statistics_bt_activity bt_activity;
245} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
246
247struct mvm_statistics_rx {
248 struct mvm_statistics_rx_phy ofdm;
249 struct mvm_statistics_rx_phy cck;
250 struct mvm_statistics_rx_non_phy general;
251 struct mvm_statistics_rx_ht_phy ofdm_ht;
252} __packed; /* STATISTICS_RX_API_S_VER_3 */
253
254/*
255 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
256 *
257 * By default, uCode issues this notification after receiving a beacon
258 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
259 * REPLY_STATISTICS_CMD 0x9c, above.
260 *
261 * Statistics counters continue to increment beacon after beacon, but are
262 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
263 * 0x9c with CLEAR_STATS bit set (see above).
264 *
265 * uCode also issues this notification during scans. uCode clears statistics
266 * appropriately so that each notification contains statistics for only the
267 * one channel that has just been scanned.
268 */
269
270struct iwl_notif_statistics {
271 __le32 flag;
272 struct mvm_statistics_rx rx;
273 struct mvm_statistics_tx tx;
274 struct mvm_statistics_general general;
275} __packed; /* STATISTICS_NTFY_API_S_VER_8 */
276
277#endif /* __fw_api_stats_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 5bca1f8bfebf..81c4ea3c6958 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -592,4 +592,43 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
592 tx_resp->frame_count) & 0xfff; 592 tx_resp->frame_count) & 0xfff;
593} 593}
594 594
595/**
596 * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
597 * @token:
598 * @sta_id: station id
599 * @tid:
600 * @scd_queue: scheduler queue to confiug
601 * @enable: 1 queue enable, 0 queue disable
602 * @aggregate: 1 aggregated queue, 0 otherwise
603 * @tx_fifo: %enum iwl_mvm_tx_fifo
604 * @window: BA window size
605 * @ssn: SSN for the BA agreement
606 */
607struct iwl_scd_txq_cfg_cmd {
608 u8 token;
609 u8 sta_id;
610 u8 tid;
611 u8 scd_queue;
612 u8 enable;
613 u8 aggregate;
614 u8 tx_fifo;
615 u8 window;
616 __le16 ssn;
617 __le16 reserved;
618} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
619
620/**
621 * struct iwl_scd_txq_cfg_rsp
622 * @token: taken from the command
623 * @sta_id: station id from the command
624 * @tid: tid from the command
625 * @scd_queue: scd_queue from the command
626 */
627struct iwl_scd_txq_cfg_rsp {
628 u8 token;
629 u8 sta_id;
630 u8 tid;
631 u8 scd_queue;
632} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
633
595#endif /* __fw_api_tx_h__ */ 634#endif /* __fw_api_tx_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 88af6dd2ceaa..b56154fe8ec5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -74,6 +74,7 @@
74#include "fw-api-d3.h" 74#include "fw-api-d3.h"
75#include "fw-api-coex.h" 75#include "fw-api-coex.h"
76#include "fw-api-scan.h" 76#include "fw-api-scan.h"
77#include "fw-api-stats.h"
77 78
78/* Tx queue numbers */ 79/* Tx queue numbers */
79enum { 80enum {
@@ -128,6 +129,9 @@ enum {
128 /* global key */ 129 /* global key */
129 WEP_KEY = 0x20, 130 WEP_KEY = 0x20,
130 131
132 /* Memory */
133 SHARED_MEM_CFG = 0x25,
134
131 /* TDLS */ 135 /* TDLS */
132 TDLS_CHANNEL_SWITCH_CMD = 0x27, 136 TDLS_CHANNEL_SWITCH_CMD = 0x27,
133 TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, 137 TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
@@ -1381,214 +1385,6 @@ struct iwl_mvm_marker {
1381 __le32 metadata[0]; 1385 __le32 metadata[0];
1382} __packed; /* MARKER_API_S_VER_1 */ 1386} __packed; /* MARKER_API_S_VER_1 */
1383 1387
1384struct mvm_statistics_dbg {
1385 __le32 burst_check;
1386 __le32 burst_count;
1387 __le32 wait_for_silence_timeout_cnt;
1388 __le32 reserved[3];
1389} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
1390
1391struct mvm_statistics_div {
1392 __le32 tx_on_a;
1393 __le32 tx_on_b;
1394 __le32 exec_time;
1395 __le32 probe_time;
1396 __le32 rssi_ant;
1397 __le32 reserved2;
1398} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
1399
1400struct mvm_statistics_general_common {
1401 __le32 temperature; /* radio temperature */
1402 __le32 temperature_m; /* radio voltage */
1403 struct mvm_statistics_dbg dbg;
1404 __le32 sleep_time;
1405 __le32 slots_out;
1406 __le32 slots_idle;
1407 __le32 ttl_timestamp;
1408 struct mvm_statistics_div div;
1409 __le32 rx_enable_counter;
1410 /*
1411 * num_of_sos_states:
1412 * count the number of times we have to re-tune
1413 * in order to get out of bad PHY status
1414 */
1415 __le32 num_of_sos_states;
1416} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
1417
1418struct mvm_statistics_rx_non_phy {
1419 __le32 bogus_cts; /* CTS received when not expecting CTS */
1420 __le32 bogus_ack; /* ACK received when not expecting ACK */
1421 __le32 non_bssid_frames; /* number of frames with BSSID that
1422 * doesn't belong to the STA BSSID */
1423 __le32 filtered_frames; /* count frames that were dumped in the
1424 * filtering process */
1425 __le32 non_channel_beacons; /* beacons with our bss id but not on
1426 * our serving channel */
1427 __le32 channel_beacons; /* beacons with our bss id and in our
1428 * serving channel */
1429 __le32 num_missed_bcon; /* number of missed beacons */
1430 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
1431 * ADC was in saturation */
1432 __le32 ina_detection_search_time;/* total time (in 0.8us) searched
1433 * for INA */
1434 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
1435 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
1436 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
1437 __le32 interference_data_flag; /* flag for interference data
1438 * availability. 1 when data is
1439 * available. */
1440 __le32 channel_load; /* counts RX Enable time in uSec */
1441 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
1442 * and CCK) counter */
1443 __le32 beacon_rssi_a;
1444 __le32 beacon_rssi_b;
1445 __le32 beacon_rssi_c;
1446 __le32 beacon_energy_a;
1447 __le32 beacon_energy_b;
1448 __le32 beacon_energy_c;
1449 __le32 num_bt_kills;
1450 __le32 mac_id;
1451 __le32 directed_data_mpdu;
1452} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
1453
1454struct mvm_statistics_rx_phy {
1455 __le32 ina_cnt;
1456 __le32 fina_cnt;
1457 __le32 plcp_err;
1458 __le32 crc32_err;
1459 __le32 overrun_err;
1460 __le32 early_overrun_err;
1461 __le32 crc32_good;
1462 __le32 false_alarm_cnt;
1463 __le32 fina_sync_err_cnt;
1464 __le32 sfd_timeout;
1465 __le32 fina_timeout;
1466 __le32 unresponded_rts;
1467 __le32 rxe_frame_limit_overrun;
1468 __le32 sent_ack_cnt;
1469 __le32 sent_cts_cnt;
1470 __le32 sent_ba_rsp_cnt;
1471 __le32 dsp_self_kill;
1472 __le32 mh_format_err;
1473 __le32 re_acq_main_rssi_sum;
1474 __le32 reserved;
1475} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
1476
1477struct mvm_statistics_rx_ht_phy {
1478 __le32 plcp_err;
1479 __le32 overrun_err;
1480 __le32 early_overrun_err;
1481 __le32 crc32_good;
1482 __le32 crc32_err;
1483 __le32 mh_format_err;
1484 __le32 agg_crc32_good;
1485 __le32 agg_mpdu_cnt;
1486 __le32 agg_cnt;
1487 __le32 unsupport_mcs;
1488} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
1489
1490#define MAX_CHAINS 3
1491
1492struct mvm_statistics_tx_non_phy_agg {
1493 __le32 ba_timeout;
1494 __le32 ba_reschedule_frames;
1495 __le32 scd_query_agg_frame_cnt;
1496 __le32 scd_query_no_agg;
1497 __le32 scd_query_agg;
1498 __le32 scd_query_mismatch;
1499 __le32 frame_not_ready;
1500 __le32 underrun;
1501 __le32 bt_prio_kill;
1502 __le32 rx_ba_rsp_cnt;
1503 __s8 txpower[MAX_CHAINS];
1504 __s8 reserved;
1505 __le32 reserved2;
1506} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
1507
1508struct mvm_statistics_tx_channel_width {
1509 __le32 ext_cca_narrow_ch20[1];
1510 __le32 ext_cca_narrow_ch40[2];
1511 __le32 ext_cca_narrow_ch80[3];
1512 __le32 ext_cca_narrow_ch160[4];
1513 __le32 last_tx_ch_width_indx;
1514 __le32 rx_detected_per_ch_width[4];
1515 __le32 success_per_ch_width[4];
1516 __le32 fail_per_ch_width[4];
1517}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
1518
1519struct mvm_statistics_tx {
1520 __le32 preamble_cnt;
1521 __le32 rx_detected_cnt;
1522 __le32 bt_prio_defer_cnt;
1523 __le32 bt_prio_kill_cnt;
1524 __le32 few_bytes_cnt;
1525 __le32 cts_timeout;
1526 __le32 ack_timeout;
1527 __le32 expected_ack_cnt;
1528 __le32 actual_ack_cnt;
1529 __le32 dump_msdu_cnt;
1530 __le32 burst_abort_next_frame_mismatch_cnt;
1531 __le32 burst_abort_missing_next_frame_cnt;
1532 __le32 cts_timeout_collision;
1533 __le32 ack_or_ba_timeout_collision;
1534 struct mvm_statistics_tx_non_phy_agg agg;
1535 struct mvm_statistics_tx_channel_width channel_width;
1536} __packed; /* STATISTICS_TX_API_S_VER_4 */
1537
1538
1539struct mvm_statistics_bt_activity {
1540 __le32 hi_priority_tx_req_cnt;
1541 __le32 hi_priority_tx_denied_cnt;
1542 __le32 lo_priority_tx_req_cnt;
1543 __le32 lo_priority_tx_denied_cnt;
1544 __le32 hi_priority_rx_req_cnt;
1545 __le32 hi_priority_rx_denied_cnt;
1546 __le32 lo_priority_rx_req_cnt;
1547 __le32 lo_priority_rx_denied_cnt;
1548} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
1549
1550struct mvm_statistics_general {
1551 struct mvm_statistics_general_common common;
1552 __le32 beacon_filtered;
1553 __le32 missed_beacons;
1554 __s8 beacon_filter_average_energy;
1555 __s8 beacon_filter_reason;
1556 __s8 beacon_filter_current_energy;
1557 __s8 beacon_filter_reserved;
1558 __le32 beacon_filter_delta_time;
1559 struct mvm_statistics_bt_activity bt_activity;
1560} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
1561
1562struct mvm_statistics_rx {
1563 struct mvm_statistics_rx_phy ofdm;
1564 struct mvm_statistics_rx_phy cck;
1565 struct mvm_statistics_rx_non_phy general;
1566 struct mvm_statistics_rx_ht_phy ofdm_ht;
1567} __packed; /* STATISTICS_RX_API_S_VER_3 */
1568
1569/*
1570 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
1571 *
1572 * By default, uCode issues this notification after receiving a beacon
1573 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
1574 * REPLY_STATISTICS_CMD 0x9c, above.
1575 *
1576 * Statistics counters continue to increment beacon after beacon, but are
1577 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
1578 * 0x9c with CLEAR_STATS bit set (see above).
1579 *
1580 * uCode also issues this notification during scans. uCode clears statistics
1581 * appropriately so that each notification contains statistics for only the
1582 * one channel that has just been scanned.
1583 */
1584
1585struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */
1586 __le32 flag;
1587 struct mvm_statistics_rx rx;
1588 struct mvm_statistics_tx tx;
1589 struct mvm_statistics_general general;
1590} __packed;
1591
1592/*********************************** 1388/***********************************
1593 * Smart Fifo API 1389 * Smart Fifo API
1594 ***********************************/ 1390 ***********************************/
@@ -1680,63 +1476,6 @@ struct iwl_dts_measurement_notif {
1680 __le32 voltage; 1476 __le32 voltage;
1681} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */ 1477} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */
1682 1478
1683/**
1684 * enum iwl_scd_control - scheduler config command control flags
1685 * @IWL_SCD_CONTROL_RM_TID: remove TID from this queue
1686 * @IWL_SCD_CONTROL_SET_SSN: use the SSN and program it into HW
1687 */
1688enum iwl_scd_control {
1689 IWL_SCD_CONTROL_RM_TID = BIT(4),
1690 IWL_SCD_CONTROL_SET_SSN = BIT(5),
1691};
1692
1693/**
1694 * enum iwl_scd_flags - scheduler config command flags
1695 * @IWL_SCD_FLAGS_SHARE_TID: multiple TIDs map to this queue
1696 * @IWL_SCD_FLAGS_SHARE_RA: multiple RAs map to this queue
1697 * @IWL_SCD_FLAGS_DQA_ENABLED: DQA is enabled
1698 */
1699enum iwl_scd_flags {
1700 IWL_SCD_FLAGS_SHARE_TID = BIT(0),
1701 IWL_SCD_FLAGS_SHARE_RA = BIT(1),
1702 IWL_SCD_FLAGS_DQA_ENABLED = BIT(2),
1703};
1704
1705#define IWL_SCDQ_INVALID_STA 0xff
1706
1707/**
1708 * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
1709 * @token: dialog token addba - unused legacy
1710 * @sta_id: station id 4-bit
1711 * @tid: TID 0..7
1712 * @scd_queue: TFD queue num 0 .. 31
1713 * @enable: 1 queue enable, 0 queue disable
1714 * @aggregate: 1 aggregated queue, 0 otherwise
1715 * @tx_fifo: tx fifo num 0..7
1716 * @window: up to 64
1717 * @ssn: starting seq num 12-bit
1718 * @control: command control flags
1719 * @flags: flags - see &enum iwl_scd_flags
1720 *
1721 * Note that every time the command is sent, all parameters must
1722 * be filled with the exception of
1723 * - the SSN, which is only used with @IWL_SCD_CONTROL_SET_SSN
1724 * - the window, which is only relevant when starting aggregation
1725 */
1726struct iwl_scd_txq_cfg_cmd {
1727 u8 token;
1728 u8 sta_id;
1729 u8 tid;
1730 u8 scd_queue;
1731 u8 enable;
1732 u8 aggregate;
1733 u8 tx_fifo;
1734 u8 window;
1735 __le16 ssn;
1736 u8 control;
1737 u8 flags;
1738} __packed;
1739
1740/*********************************** 1479/***********************************
1741 * TDLS API 1480 * TDLS API
1742 ***********************************/ 1481 ***********************************/
@@ -1878,4 +1617,36 @@ struct iwl_tdls_config_res {
1878 struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; 1617 struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
1879} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ 1618} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
1880 1619
1620#define TX_FIFO_MAX_NUM 8
1621#define RX_FIFO_MAX_NUM 2
1622
1623/**
1624 * Shared memory configuration information from the FW
1625 *
1626 * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
1627 * accessible)
1628 * @shared_mem_size: shared memory size
1629 * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to
1630 * 0x0 as accessible only via DBGM RDAT)
1631 * @sample_buff_size: internal sample buff size
1632 * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre
1633 * 8000 HW set to 0x0 as not accessible)
1634 * @txfifo_size: size of TXF0 ... TXF7
1635 * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0
1636 * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
1637 * when paging is not supported this should be 0
1638 * @page_buff_size: size of %page_buff_addr
1639 */
1640struct iwl_shared_mem_cfg {
1641 __le32 shared_mem_addr;
1642 __le32 shared_mem_size;
1643 __le32 sample_buff_addr;
1644 __le32 sample_buff_size;
1645 __le32 txfifo_addr;
1646 __le32 txfifo_size[TX_FIFO_MAX_NUM];
1647 __le32 rxfifo_size[RX_FIFO_MAX_NUM];
1648 __le32 page_buff_addr;
1649 __le32 page_buff_size;
1650} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
1651
1881#endif /* __fw_api_h__ */ 1652#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index d0fa6e9ed590..ca38e9817374 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -70,6 +70,7 @@
70#include "iwl-debug.h" 70#include "iwl-debug.h"
71#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ 71#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
72#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ 72#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
73#include "iwl-prph.h"
73#include "iwl-eeprom-parse.h" 74#include "iwl-eeprom-parse.h"
74 75
75#include "mvm.h" 76#include "mvm.h"
@@ -269,7 +270,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
269 enum iwl_ucode_type ucode_type = mvm->cur_ucode; 270 enum iwl_ucode_type ucode_type = mvm->cur_ucode;
270 271
271 /* Set parameters */ 272 /* Set parameters */
272 phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config); 273 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
273 phy_cfg_cmd.calib_control.event_trigger = 274 phy_cfg_cmd.calib_control.event_trigger =
274 mvm->fw->default_calib[ucode_type].event_trigger; 275 mvm->fw->default_calib[ucode_type].event_trigger;
275 phy_cfg_cmd.calib_control.flow_trigger = 276 phy_cfg_cmd.calib_control.flow_trigger =
@@ -346,7 +347,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
346 mvm->calibrating = true; 347 mvm->calibrating = true;
347 348
348 /* Send TX valid antennas before triggering calibrations */ 349 /* Send TX valid antennas before triggering calibrations */
349 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); 350 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
350 if (ret) 351 if (ret)
351 goto error; 352 goto error;
352 353
@@ -399,8 +400,71 @@ out:
399 return ret; 400 return ret;
400} 401}
401 402
402static int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, 403static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
403 enum iwl_fw_dbg_conf conf_id) 404{
405 struct iwl_host_cmd cmd = {
406 .id = SHARED_MEM_CFG,
407 .flags = CMD_WANT_SKB,
408 .data = { NULL, },
409 .len = { 0, },
410 };
411 struct iwl_rx_packet *pkt;
412 struct iwl_shared_mem_cfg *mem_cfg;
413 u32 i;
414
415 lockdep_assert_held(&mvm->mutex);
416
417 if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
418 return;
419
420 pkt = cmd.resp_pkt;
421 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
422 IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
423 pkt->hdr.flags);
424 goto exit;
425 }
426
427 mem_cfg = (void *)pkt->data;
428
429 mvm->shared_mem_cfg.shared_mem_addr =
430 le32_to_cpu(mem_cfg->shared_mem_addr);
431 mvm->shared_mem_cfg.shared_mem_size =
432 le32_to_cpu(mem_cfg->shared_mem_size);
433 mvm->shared_mem_cfg.sample_buff_addr =
434 le32_to_cpu(mem_cfg->sample_buff_addr);
435 mvm->shared_mem_cfg.sample_buff_size =
436 le32_to_cpu(mem_cfg->sample_buff_size);
437 mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
438 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
439 mvm->shared_mem_cfg.txfifo_size[i] =
440 le32_to_cpu(mem_cfg->txfifo_size[i]);
441 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
442 mvm->shared_mem_cfg.rxfifo_size[i] =
443 le32_to_cpu(mem_cfg->rxfifo_size[i]);
444 mvm->shared_mem_cfg.page_buff_addr =
445 le32_to_cpu(mem_cfg->page_buff_addr);
446 mvm->shared_mem_cfg.page_buff_size =
447 le32_to_cpu(mem_cfg->page_buff_size);
448 IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
449
450exit:
451 iwl_free_resp(&cmd);
452}
453
454void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm)
455{
456 /* stop recording */
457 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
458 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
459 } else {
460 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
461 iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
462 }
463
464 schedule_work(&mvm->fw_error_dump_wk);
465}
466
467int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf conf_id)
404{ 468{
405 u8 *ptr; 469 u8 *ptr;
406 int ret; 470 int ret;
@@ -435,6 +499,35 @@ static int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm,
435 return ret; 499 return ret;
436} 500}
437 501
502static int iwl_mvm_config_ltr_v1(struct iwl_mvm *mvm)
503{
504 struct iwl_ltr_config_cmd_v1 cmd_v1 = {
505 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
506 };
507
508 if (!mvm->trans->ltr_enabled)
509 return 0;
510
511 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
512 sizeof(cmd_v1), &cmd_v1);
513}
514
515static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
516{
517 struct iwl_ltr_config_cmd cmd = {
518 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
519 };
520
521 if (!mvm->trans->ltr_enabled)
522 return 0;
523
524 if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0))
525 return iwl_mvm_config_ltr_v1(mvm);
526
527 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
528 sizeof(cmd), &cmd);
529}
530
438int iwl_mvm_up(struct iwl_mvm *mvm) 531int iwl_mvm_up(struct iwl_mvm *mvm)
439{ 532{
440 int ret, i; 533 int ret, i;
@@ -482,6 +575,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
482 goto error; 575 goto error;
483 } 576 }
484 577
578 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
579 iwl_mvm_get_shared_mem_conf(mvm);
580
485 ret = iwl_mvm_sf_update(mvm, NULL, false); 581 ret = iwl_mvm_sf_update(mvm, NULL, false);
486 if (ret) 582 if (ret)
487 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); 583 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
@@ -489,7 +585,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
489 mvm->fw_dbg_conf = FW_DBG_INVALID; 585 mvm->fw_dbg_conf = FW_DBG_INVALID;
490 iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM); 586 iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_CUSTOM);
491 587
492 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); 588 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
493 if (ret) 589 if (ret)
494 goto error; 590 goto error;
495 591
@@ -538,14 +634,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
538 /* Initialize tx backoffs to the minimal possible */ 634 /* Initialize tx backoffs to the minimal possible */
539 iwl_mvm_tt_tx_backoff(mvm, 0); 635 iwl_mvm_tt_tx_backoff(mvm, 0);
540 636
541 if (mvm->trans->ltr_enabled) { 637 WARN_ON(iwl_mvm_config_ltr(mvm));
542 struct iwl_ltr_config_cmd cmd = {
543 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
544 };
545
546 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
547 sizeof(cmd), &cmd));
548 }
549 638
550 ret = iwl_mvm_power_update_device(mvm); 639 ret = iwl_mvm_power_update_device(mvm);
551 if (ret) 640 if (ret)
@@ -584,7 +673,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
584 goto error; 673 goto error;
585 } 674 }
586 675
587 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); 676 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
588 if (ret) 677 if (ret)
589 goto error; 678 goto error;
590 679
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index f6d86ccce6a8..7bdc6220743f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -208,8 +208,10 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
208 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 208 if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
209 return BIT(IWL_MVM_OFFCHANNEL_QUEUE); 209 return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
210 210
211 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 211 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
212 qmask |= BIT(vif->hw_queue[ac]); 212 if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
213 qmask |= BIT(vif->hw_queue[ac]);
214 }
213 215
214 if (vif->type == NL80211_IFTYPE_AP) 216 if (vif->type == NL80211_IFTYPE_AP)
215 qmask |= BIT(vif->cab_queue); 217 qmask |= BIT(vif->cab_queue);
@@ -460,6 +462,9 @@ exit_fail:
460 462
461int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 463int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
462{ 464{
465 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
466 mvm->cfg->base_params->wd_timeout :
467 IWL_WATCHDOG_DISABLED;
463 u32 ac; 468 u32 ac;
464 int ret; 469 int ret;
465 470
@@ -472,16 +477,17 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
472 switch (vif->type) { 477 switch (vif->type) {
473 case NL80211_IFTYPE_P2P_DEVICE: 478 case NL80211_IFTYPE_P2P_DEVICE:
474 iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, 479 iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
475 IWL_MVM_TX_FIFO_VO); 480 IWL_MVM_TX_FIFO_VO, wdg_timeout);
476 break; 481 break;
477 case NL80211_IFTYPE_AP: 482 case NL80211_IFTYPE_AP:
478 iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, 483 iwl_mvm_enable_ac_txq(mvm, vif->cab_queue,
479 IWL_MVM_TX_FIFO_MCAST); 484 IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
480 /* fall through */ 485 /* fall through */
481 default: 486 default:
482 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 487 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
483 iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], 488 iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
484 iwl_mvm_ac_to_tx_fifo[ac]); 489 iwl_mvm_ac_to_tx_fifo[ac],
490 wdg_timeout);
485 break; 491 break;
486 } 492 }
487 493
@@ -496,14 +502,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
496 502
497 switch (vif->type) { 503 switch (vif->type) {
498 case NL80211_IFTYPE_P2P_DEVICE: 504 case NL80211_IFTYPE_P2P_DEVICE:
499 iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE); 505 iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, 0);
500 break; 506 break;
501 case NL80211_IFTYPE_AP: 507 case NL80211_IFTYPE_AP:
502 iwl_mvm_disable_txq(mvm, vif->cab_queue); 508 iwl_mvm_disable_txq(mvm, vif->cab_queue, 0);
503 /* fall through */ 509 /* fall through */
504 default: 510 default:
505 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 511 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
506 iwl_mvm_disable_txq(mvm, vif->hw_queue[ac]); 512 iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], 0);
507 } 513 }
508} 514}
509 515
@@ -975,7 +981,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
975 beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags); 981 beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags);
976 982
977 mvm->mgmt_last_antenna_idx = 983 mvm->mgmt_last_antenna_idx =
978 iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant, 984 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
979 mvm->mgmt_last_antenna_idx); 985 mvm->mgmt_last_antenna_idx);
980 986
981 beacon_cmd.tx.rate_n_flags = 987 beacon_cmd.tx.rate_n_flags =
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 20915587c820..1ff7ec08532d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -85,6 +85,7 @@
85#include "testmode.h" 85#include "testmode.h"
86#include "iwl-fw-error-dump.h" 86#include "iwl-fw-error-dump.h"
87#include "iwl-prph.h" 87#include "iwl-prph.h"
88#include "iwl-csr.h"
88 89
89static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 90static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
90 { 91 {
@@ -105,7 +106,7 @@ static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
105 106
106static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { 107static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
107 { 108 {
108 .num_different_channels = 1, 109 .num_different_channels = 2,
109 .max_interfaces = 3, 110 .max_interfaces = 3,
110 .limits = iwl_mvm_limits, 111 .limits = iwl_mvm_limits,
111 .n_limits = ARRAY_SIZE(iwl_mvm_limits), 112 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
@@ -326,6 +327,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
326 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | 327 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
327 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; 328 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
328 hw->rate_control_algorithm = "iwl-mvm-rs"; 329 hw->rate_control_algorithm = "iwl-mvm-rs";
330 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
331 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
329 332
330 /* 333 /*
331 * Enable 11w if advertised by firmware and software crypto 334 * Enable 11w if advertised by firmware and software crypto
@@ -336,13 +339,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
336 !iwlwifi_mod_params.sw_crypto) 339 !iwlwifi_mod_params.sw_crypto)
337 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 340 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
338 341
339 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
340 !iwlwifi_mod_params.uapsd_disable) {
341 hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
342 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
343 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
344 }
345
346 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN || 342 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
347 mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { 343 mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
348 hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS; 344 hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
@@ -377,6 +373,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
377 373
378 hw->wiphy->max_remain_on_channel_duration = 10000; 374 hw->wiphy->max_remain_on_channel_duration = 10000;
379 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 375 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
376 /* we can compensate an offset of up to 3 channels = 15 MHz */
377 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
380 378
381 /* Extract MAC address */ 379 /* Extract MAC address */
382 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 380 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -403,10 +401,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
403 if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels) 401 if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
404 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 402 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
405 &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; 403 &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
406 if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) 404 if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
407 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 405 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
408 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; 406 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
409 407
408 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER)
409 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
410 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
411 }
412
410 hw->wiphy->hw_version = mvm->trans->hw_id; 413 hw->wiphy->hw_version = mvm->trans->hw_id;
411 414
412 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) 415 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
@@ -459,15 +462,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
459 device_can_wakeup(mvm->trans->dev)) { 462 device_can_wakeup(mvm->trans->dev)) {
460 mvm->wowlan.flags = WIPHY_WOWLAN_ANY; 463 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
461 hw->wiphy->wowlan = &mvm->wowlan; 464 hw->wiphy->wowlan = &mvm->wowlan;
462 } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && 465 }
466
467 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
463 mvm->trans->ops->d3_suspend && 468 mvm->trans->ops->d3_suspend &&
464 mvm->trans->ops->d3_resume && 469 mvm->trans->ops->d3_resume &&
465 device_can_wakeup(mvm->trans->dev)) { 470 device_can_wakeup(mvm->trans->dev)) {
466 mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | 471 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
467 WIPHY_WOWLAN_DISCONNECT | 472 WIPHY_WOWLAN_DISCONNECT |
468 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 473 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
469 WIPHY_WOWLAN_RFKILL_RELEASE | 474 WIPHY_WOWLAN_RFKILL_RELEASE |
470 WIPHY_WOWLAN_NET_DETECT; 475 WIPHY_WOWLAN_NET_DETECT;
471 if (!iwlwifi_mod_params.sw_crypto) 476 if (!iwlwifi_mod_params.sw_crypto)
472 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 477 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
473 WIPHY_WOWLAN_GTK_REKEY_FAILURE | 478 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@@ -707,9 +712,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
707 mvmvif->uploaded = false; 712 mvmvif->uploaded = false;
708 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; 713 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
709 714
710 /* does this make sense at all? */
711 mvmvif->color++;
712
713 spin_lock_bh(&mvm->time_event_lock); 715 spin_lock_bh(&mvm->time_event_lock);
714 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); 716 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
715 spin_unlock_bh(&mvm->time_event_lock); 717 spin_unlock_bh(&mvm->time_event_lock);
@@ -761,41 +763,215 @@ static void iwl_mvm_free_coredump(const void *data)
761 kfree(fw_error_dump); 763 kfree(fw_error_dump);
762} 764}
763 765
766static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
767 struct iwl_fw_error_dump_data **dump_data)
768{
769 struct iwl_fw_error_dump_fifo *fifo_hdr;
770 u32 *fifo_data;
771 u32 fifo_len;
772 unsigned long flags;
773 int i, j;
774
775 if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
776 return;
777
778 /* Pull RXF data from all RXFs */
779 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
780 /*
781 * Keep aside the additional offset that might be needed for
782 * next RXF
783 */
784 u32 offset_diff = RXF_DIFF_FROM_PREV * i;
785
786 fifo_hdr = (void *)(*dump_data)->data;
787 fifo_data = (void *)fifo_hdr->data;
788 fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
789
790 /* No need to try to read the data if the length is 0 */
791 if (fifo_len == 0)
792 continue;
793
794 /* Add a TLV for the RXF */
795 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
796 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
797
798 fifo_hdr->fifo_num = cpu_to_le32(i);
799 fifo_hdr->available_bytes =
800 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
801 RXF_RD_D_SPACE +
802 offset_diff));
803 fifo_hdr->wr_ptr =
804 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
805 RXF_RD_WR_PTR +
806 offset_diff));
807 fifo_hdr->rd_ptr =
808 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
809 RXF_RD_RD_PTR +
810 offset_diff));
811 fifo_hdr->fence_ptr =
812 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
813 RXF_RD_FENCE_PTR +
814 offset_diff));
815 fifo_hdr->fence_mode =
816 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
817 RXF_SET_FENCE_MODE +
818 offset_diff));
819
820 /* Lock fence */
821 iwl_trans_write_prph(mvm->trans,
822 RXF_SET_FENCE_MODE + offset_diff, 0x1);
823 /* Set fence pointer to the same place like WR pointer */
824 iwl_trans_write_prph(mvm->trans,
825 RXF_LD_WR2FENCE + offset_diff, 0x1);
826 /* Set fence offset */
827 iwl_trans_write_prph(mvm->trans,
828 RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
829 0x0);
830
831 /* Read FIFO */
832 fifo_len /= sizeof(u32); /* Size in DWORDS */
833 for (j = 0; j < fifo_len; j++)
834 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
835 RXF_FIFO_RD_FENCE_INC +
836 offset_diff);
837 *dump_data = iwl_fw_error_next_data(*dump_data);
838 }
839
840 /* Pull TXF data from all TXFs */
841 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
842 /* Mark the number of TXF we're pulling now */
843 iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
844
845 fifo_hdr = (void *)(*dump_data)->data;
846 fifo_data = (void *)fifo_hdr->data;
847 fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
848
849 /* No need to try to read the data if the length is 0 */
850 if (fifo_len == 0)
851 continue;
852
853 /* Add a TLV for the FIFO */
854 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
855 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
856
857 fifo_hdr->fifo_num = cpu_to_le32(i);
858 fifo_hdr->available_bytes =
859 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
860 TXF_FIFO_ITEM_CNT));
861 fifo_hdr->wr_ptr =
862 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
863 TXF_WR_PTR));
864 fifo_hdr->rd_ptr =
865 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
866 TXF_RD_PTR));
867 fifo_hdr->fence_ptr =
868 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
869 TXF_FENCE_PTR));
870 fifo_hdr->fence_mode =
871 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
872 TXF_LOCK_FENCE));
873
874 /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
875 iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
876 TXF_WR_PTR);
877
878 /* Dummy-read to advance the read pointer to the head */
879 iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
880
881 /* Read FIFO */
882 fifo_len /= sizeof(u32); /* Size in DWORDS */
883 for (j = 0; j < fifo_len; j++)
884 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
885 TXF_READ_MODIFY_DATA);
886 *dump_data = iwl_fw_error_next_data(*dump_data);
887 }
888
889 iwl_trans_release_nic_access(mvm->trans, &flags);
890}
891
764void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) 892void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
765{ 893{
766 struct iwl_fw_error_dump_file *dump_file; 894 struct iwl_fw_error_dump_file *dump_file;
767 struct iwl_fw_error_dump_data *dump_data; 895 struct iwl_fw_error_dump_data *dump_data;
768 struct iwl_fw_error_dump_info *dump_info; 896 struct iwl_fw_error_dump_info *dump_info;
897 struct iwl_fw_error_dump_mem *dump_mem;
769 struct iwl_mvm_dump_ptrs *fw_error_dump; 898 struct iwl_mvm_dump_ptrs *fw_error_dump;
770 const struct fw_img *img;
771 u32 sram_len, sram_ofs; 899 u32 sram_len, sram_ofs;
772 u32 file_len, rxf_len; 900 u32 file_len, fifo_data_len = 0;
773 unsigned long flags; 901 u32 smem_len = mvm->cfg->smem_len;
774 int reg_val; 902 u32 sram2_len = mvm->cfg->dccm2_len;
775 903
776 lockdep_assert_held(&mvm->mutex); 904 lockdep_assert_held(&mvm->mutex);
777 905
906 /* W/A for 8000 HW family A-step */
907 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
908 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP) {
909 if (smem_len)
910 smem_len = 0x38000;
911
912 if (sram2_len)
913 sram2_len = 0x10000;
914 }
915
778 fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); 916 fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
779 if (!fw_error_dump) 917 if (!fw_error_dump)
780 return; 918 return;
781 919
782 img = &mvm->fw->img[mvm->cur_ucode]; 920 /* SRAM - include stack CCM if driver knows the values for it */
783 sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; 921 if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
784 sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; 922 const struct fw_img *img;
923
924 img = &mvm->fw->img[mvm->cur_ucode];
925 sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
926 sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
927 } else {
928 sram_ofs = mvm->cfg->dccm_offset;
929 sram_len = mvm->cfg->dccm_len;
930 }
931
932 /* reading RXF/TXF sizes */
933 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
934 struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
935 int i;
936
937 fifo_data_len = 0;
938
939 /* Count RXF size */
940 for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
941 if (!mem_cfg->rxfifo_size[i])
942 continue;
943
944 /* Add header info */
945 fifo_data_len += mem_cfg->rxfifo_size[i] +
946 sizeof(*dump_data) +
947 sizeof(struct iwl_fw_error_dump_fifo);
948 }
785 949
786 /* reading buffer size */ 950 for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
787 reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR); 951 if (!mem_cfg->txfifo_size[i])
788 rxf_len = (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS; 952 continue;
789 953
790 /* the register holds the value divided by 128 */ 954 /* Add header info */
791 rxf_len = rxf_len << 7; 955 fifo_data_len += mem_cfg->txfifo_size[i] +
956 sizeof(*dump_data) +
957 sizeof(struct iwl_fw_error_dump_fifo);
958 }
959 }
792 960
793 file_len = sizeof(*dump_file) + 961 file_len = sizeof(*dump_file) +
794 sizeof(*dump_data) * 3 + 962 sizeof(*dump_data) * 2 +
795 sram_len + 963 sram_len + sizeof(*dump_mem) +
796 rxf_len + 964 fifo_data_len +
797 sizeof(*dump_info); 965 sizeof(*dump_info);
798 966
967 /* Make room for the SMEM, if it exists */
968 if (smem_len)
969 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
970
971 /* Make room for the secondary SRAM, if it exists */
972 if (sram2_len)
973 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
974
799 dump_file = vzalloc(file_len); 975 dump_file = vzalloc(file_len);
800 if (!dump_file) { 976 if (!dump_file) {
801 kfree(fw_error_dump); 977 kfree(fw_error_dump);
@@ -814,6 +990,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
814 mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? 990 mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
815 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : 991 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
816 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); 992 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
993 dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
817 memcpy(dump_info->fw_human_readable, mvm->fw->human_readable, 994 memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
818 sizeof(dump_info->fw_human_readable)); 995 sizeof(dump_info->fw_human_readable));
819 strncpy(dump_info->dev_human_readable, mvm->cfg->name, 996 strncpy(dump_info->dev_human_readable, mvm->cfg->name,
@@ -822,28 +999,39 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
822 sizeof(dump_info->bus_human_readable)); 999 sizeof(dump_info->bus_human_readable));
823 1000
824 dump_data = iwl_fw_error_next_data(dump_data); 1001 dump_data = iwl_fw_error_next_data(dump_data);
825 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); 1002 /* We only dump the FIFOs if the FW is in error state */
826 dump_data->len = cpu_to_le32(rxf_len); 1003 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
827 1004 iwl_mvm_dump_fifos(mvm, &dump_data);
828 if (iwl_trans_grab_nic_access(mvm->trans, false, &flags)) { 1005
829 u32 *rxf = (void *)dump_data->data; 1006 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
830 int i; 1007 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
1008 dump_mem = (void *)dump_data->data;
1009 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1010 dump_mem->offset = cpu_to_le32(sram_ofs);
1011 iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
1012 sram_len);
831 1013
832 for (i = 0; i < (rxf_len / sizeof(u32)); i++) { 1014 if (smem_len) {
833 iwl_trans_write_prph(mvm->trans, 1015 dump_data = iwl_fw_error_next_data(dump_data);
834 RXF_LD_FENCE_OFFSET_ADDR, 1016 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
835 i * sizeof(u32)); 1017 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
836 rxf[i] = iwl_trans_read_prph(mvm->trans, 1018 dump_mem = (void *)dump_data->data;
837 RXF_FIFO_RD_FENCE_ADDR); 1019 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
838 } 1020 dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
839 iwl_trans_release_nic_access(mvm->trans, &flags); 1021 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
1022 dump_mem->data, smem_len);
840 } 1023 }
841 1024
842 dump_data = iwl_fw_error_next_data(dump_data); 1025 if (sram2_len) {
843 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM); 1026 dump_data = iwl_fw_error_next_data(dump_data);
844 dump_data->len = cpu_to_le32(sram_len); 1027 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
845 iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_data->data, 1028 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
846 sram_len); 1029 dump_mem = (void *)dump_data->data;
1030 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1031 dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
1032 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
1033 dump_mem->data, sram2_len);
1034 }
847 1035
848 fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans); 1036 fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
849 fw_error_dump->op_mode_len = file_len; 1037 fw_error_dump->op_mode_len = file_len;
@@ -864,6 +1052,11 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
864 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) 1052 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status))
865 iwl_mvm_fw_error_dump(mvm); 1053 iwl_mvm_fw_error_dump(mvm);
866 1054
1055 /* cleanup all stale references (scan, roc), but keep the
1056 * ucode_down ref until reconfig is complete
1057 */
1058 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1059
867 iwl_trans_stop_device(mvm->trans); 1060 iwl_trans_stop_device(mvm->trans);
868 1061
869 mvm->scan_status = IWL_MVM_SCAN_NONE; 1062 mvm->scan_status = IWL_MVM_SCAN_NONE;
@@ -893,10 +1086,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
893 1086
894 ieee80211_wake_queues(mvm->hw); 1087 ieee80211_wake_queues(mvm->hw);
895 1088
896 /* cleanup all stale references (scan, roc), but keep the
897 * ucode_down ref until reconfig is complete */
898 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
899
900 /* clear any stale d0i3 state */ 1089 /* clear any stale d0i3 state */
901 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); 1090 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
902 1091
@@ -933,6 +1122,19 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
933 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1122 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
934 int ret; 1123 int ret;
935 1124
1125 /* Some hw restart cleanups must not hold the mutex */
1126 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1127 /*
1128 * Make sure we are out of d0i3. This is needed
1129 * to make sure the reference accounting is correct
1130 * (and there is no stale d0i3_exit_work).
1131 */
1132 wait_event_timeout(mvm->d0i3_exit_waitq,
1133 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1134 &mvm->status),
1135 HZ);
1136 }
1137
936 mutex_lock(&mvm->mutex); 1138 mutex_lock(&mvm->mutex);
937 ret = __iwl_mvm_mac_start(mvm); 1139 ret = __iwl_mvm_mac_start(mvm);
938 mutex_unlock(&mvm->mutex); 1140 mutex_unlock(&mvm->mutex);
@@ -982,6 +1184,13 @@ static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
982 IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); 1184 IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
983 _iwl_mvm_exit_d0i3(mvm); 1185 _iwl_mvm_exit_d0i3(mvm);
984 } 1186 }
1187
1188 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
1189 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
1190 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1191 &mvm->status),
1192 HZ))
1193 WARN_ONCE(1, "D0i3 exit on resume timed out\n");
985} 1194}
986 1195
987static void 1196static void
@@ -1146,7 +1355,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1146 1355
1147 ret = iwl_mvm_power_update_mac(mvm); 1356 ret = iwl_mvm_power_update_mac(mvm);
1148 if (ret) 1357 if (ret)
1149 goto out_release; 1358 goto out_remove_mac;
1150 1359
1151 /* beacon filtering */ 1360 /* beacon filtering */
1152 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 1361 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
@@ -2088,7 +2297,7 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2088 struct ieee80211_sta *sta) 2297 struct ieee80211_sta *sta)
2089{ 2298{
2090 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2299 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2091 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 2300 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2092 2301
2093 /* 2302 /*
2094 * This is called before mac80211 does RCU synchronisation, 2303 * This is called before mac80211 does RCU synchronisation,
@@ -2105,6 +2314,20 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2105 mutex_unlock(&mvm->mutex); 2314 mutex_unlock(&mvm->mutex);
2106} 2315}
2107 2316
2317static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2318 const u8 *bssid)
2319{
2320 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2321 return;
2322
2323 if (iwlwifi_mod_params.uapsd_disable) {
2324 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2325 return;
2326 }
2327
2328 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2329}
2330
2108static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, 2331static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2109 struct ieee80211_vif *vif, 2332 struct ieee80211_vif *vif,
2110 struct ieee80211_sta *sta, 2333 struct ieee80211_sta *sta,
@@ -2164,6 +2387,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2164 * Reset EBS status here assuming environment has been changed. 2387 * Reset EBS status here assuming environment has been changed.
2165 */ 2388 */
2166 mvm->last_ebs_successful = true; 2389 mvm->last_ebs_successful = true;
2390 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2167 ret = 0; 2391 ret = 0;
2168 } else if (old_state == IEEE80211_STA_AUTH && 2392 } else if (old_state == IEEE80211_STA_AUTH &&
2169 new_state == IEEE80211_STA_ASSOC) { 2393 new_state == IEEE80211_STA_ASSOC) {
@@ -3103,7 +3327,7 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3103 bool set) 3327 bool set)
3104{ 3328{
3105 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3329 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3106 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 3330 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3107 3331
3108 if (!mvm_sta || !mvm_sta->vif) { 3332 if (!mvm_sta || !mvm_sta->vif) {
3109 IWL_ERR(mvm, "Station is not associated to a vif\n"); 3333 IWL_ERR(mvm, "Station is not associated to a vif\n");
@@ -3343,16 +3567,18 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3343 msk |= mvmsta->tfd_queue_msk; 3567 msk |= mvmsta->tfd_queue_msk;
3344 } 3568 }
3345 3569
3346 msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); 3570 if (drop) {
3347 3571 if (iwl_mvm_flush_tx_path(mvm, msk, true))
3348 if (iwl_mvm_flush_tx_path(mvm, msk, true)) 3572 IWL_ERR(mvm, "flush request fail\n");
3349 IWL_ERR(mvm, "flush request fail\n"); 3573 mutex_unlock(&mvm->mutex);
3350 mutex_unlock(&mvm->mutex); 3574 } else {
3575 mutex_unlock(&mvm->mutex);
3351 3576
3352 /* this can take a while, and we may need/want other operations 3577 /* this can take a while, and we may need/want other operations
3353 * to succeed while doing this, so do it without the mutex held 3578 * to succeed while doing this, so do it without the mutex held
3354 */ 3579 */
3355 iwl_trans_wait_tx_queue_empty(mvm->trans, msk); 3580 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3581 }
3356} 3582}
3357 3583
3358const struct ieee80211_ops iwl_mvm_hw_ops = { 3584const struct ieee80211_ops iwl_mvm_hw_ops = {
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d24660fb4ef2..6c69d0584f6c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -119,11 +119,13 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops;
119 * We will register to mac80211 to have testmode working. The NIC must not 119 * We will register to mac80211 to have testmode working. The NIC must not
120 * be up'ed after the INIT fw asserted. This is useful to be able to use 120 * be up'ed after the INIT fw asserted. This is useful to be able to use
121 * proprietary tools over testmode to debug the INIT fw. 121 * proprietary tools over testmode to debug the INIT fw.
122 * @tfd_q_hang_detect: enabled the detection of hung transmit queues
122 * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power 123 * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
123 * Save)-2(default), LP(Low Power)-3 124 * Save)-2(default), LP(Low Power)-3
124 */ 125 */
125struct iwl_mvm_mod_params { 126struct iwl_mvm_mod_params {
126 bool init_dbg; 127 bool init_dbg;
128 bool tfd_q_hang_detect;
127 int power_scheme; 129 int power_scheme;
128}; 130};
129extern struct iwl_mvm_mod_params iwlmvm_mod_params; 131extern struct iwl_mvm_mod_params iwlmvm_mod_params;
@@ -276,6 +278,7 @@ enum iwl_mvm_ref_type {
276 IWL_MVM_REF_TM_CMD, 278 IWL_MVM_REF_TM_CMD,
277 IWL_MVM_REF_EXIT_WORK, 279 IWL_MVM_REF_EXIT_WORK,
278 IWL_MVM_REF_PROTECT_CSA, 280 IWL_MVM_REF_PROTECT_CSA,
281 IWL_MVM_REF_FW_DBG_COLLECT,
279 282
280 /* update debugfs.c when changing this */ 283 /* update debugfs.c when changing this */
281 284
@@ -531,10 +534,23 @@ enum {
531enum iwl_mvm_tdls_cs_state { 534enum iwl_mvm_tdls_cs_state {
532 IWL_MVM_TDLS_SW_IDLE = 0, 535 IWL_MVM_TDLS_SW_IDLE = 0,
533 IWL_MVM_TDLS_SW_REQ_SENT, 536 IWL_MVM_TDLS_SW_REQ_SENT,
537 IWL_MVM_TDLS_SW_RESP_RCVD,
534 IWL_MVM_TDLS_SW_REQ_RCVD, 538 IWL_MVM_TDLS_SW_REQ_RCVD,
535 IWL_MVM_TDLS_SW_ACTIVE, 539 IWL_MVM_TDLS_SW_ACTIVE,
536}; 540};
537 541
542struct iwl_mvm_shared_mem_cfg {
543 u32 shared_mem_addr;
544 u32 shared_mem_size;
545 u32 sample_buff_addr;
546 u32 sample_buff_size;
547 u32 txfifo_addr;
548 u32 txfifo_size[TX_FIFO_MAX_NUM];
549 u32 rxfifo_size[RX_FIFO_MAX_NUM];
550 u32 page_buff_addr;
551 u32 page_buff_size;
552};
553
538struct iwl_mvm { 554struct iwl_mvm {
539 /* for logger access */ 555 /* for logger access */
540 struct device *dev; 556 struct device *dev;
@@ -641,6 +657,8 @@ struct iwl_mvm {
641 bool disable_power_off; 657 bool disable_power_off;
642 bool disable_power_off_d3; 658 bool disable_power_off_d3;
643 659
660 bool scan_iter_notif_enabled;
661
644 struct debugfs_blob_wrapper nvm_hw_blob; 662 struct debugfs_blob_wrapper nvm_hw_blob;
645 struct debugfs_blob_wrapper nvm_sw_blob; 663 struct debugfs_blob_wrapper nvm_sw_blob;
646 struct debugfs_blob_wrapper nvm_calib_blob; 664 struct debugfs_blob_wrapper nvm_calib_blob;
@@ -782,8 +800,13 @@ struct iwl_mvm {
782 struct cfg80211_chan_def chandef; 800 struct cfg80211_chan_def chandef;
783 struct sk_buff *skb; /* ch sw template */ 801 struct sk_buff *skb; /* ch sw template */
784 u32 ch_sw_tm_ie; 802 u32 ch_sw_tm_ie;
803
804 /* timestamp of last ch-sw request sent (GP2 time) */
805 u32 sent_timestamp;
785 } peer; 806 } peer;
786 } tdls_cs; 807 } tdls_cs;
808
809 struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
787}; 810};
788 811
789/* Extract MVM priv from op_mode and _hw */ 812/* Extract MVM priv from op_mode and _hw */
@@ -850,12 +873,14 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
850static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) 873static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
851{ 874{
852 return mvm->trans->cfg->d0i3 && 875 return mvm->trans->cfg->d0i3 &&
876 mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF &&
877 !iwlwifi_mod_params.d0i3_disable &&
853 (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); 878 (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
854} 879}
855 880
856static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) 881static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
857{ 882{
858 return mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_DQA_SUPPORT; 883 return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
859} 884}
860 885
861extern const u8 iwl_mvm_ac_to_tx_fifo[]; 886extern const u8 iwl_mvm_ac_to_tx_fifo[];
@@ -937,6 +962,33 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
937int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic); 962int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
938int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); 963int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
939 964
965static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
966{
967 return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ?
968 mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant :
969 mvm->fw->valid_tx_ant;
970}
971
972static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
973{
974 return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ?
975 mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant :
976 mvm->fw->valid_rx_ant;
977}
978
979static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
980{
981 u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
982 FW_PHY_CFG_RX_CHAIN);
983 u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
984 u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
985
986 phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
987 valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
988
989 return mvm->fw->phy_config & phy_config;
990}
991
940int iwl_mvm_up(struct iwl_mvm *mvm); 992int iwl_mvm_up(struct iwl_mvm *mvm);
941int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); 993int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
942 994
@@ -970,6 +1022,9 @@ int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
970 struct iwl_device_cmd *cmd); 1022 struct iwl_device_cmd *cmd);
971int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, 1023int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
972 struct iwl_device_cmd *cmd); 1024 struct iwl_device_cmd *cmd);
1025int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
1026 struct iwl_rx_cmd_buffer *rxb,
1027 struct iwl_device_cmd *cmd);
973 1028
974/* MVM PHY */ 1029/* MVM PHY */
975int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, 1030int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@@ -1031,6 +1086,9 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
1031int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, 1086int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
1032 struct iwl_rx_cmd_buffer *rxb, 1087 struct iwl_rx_cmd_buffer *rxb,
1033 struct iwl_device_cmd *cmd); 1088 struct iwl_device_cmd *cmd);
1089int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
1090 struct iwl_rx_cmd_buffer *rxb,
1091 struct iwl_device_cmd *cmd);
1034int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, 1092int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
1035 struct ieee80211_vif *vif, 1093 struct ieee80211_vif *vif,
1036 struct cfg80211_sched_scan_request *req, 1094 struct cfg80211_sched_scan_request *req,
@@ -1091,9 +1149,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1091 1149
1092/* rate scaling */ 1150/* rate scaling */
1093int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init); 1151int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
1094void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, 1152void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
1095 struct iwl_mvm_frame_stats *stats,
1096 u32 rate, bool agg);
1097int rs_pretty_print_rate(char *buf, const u32 rate); 1153int rs_pretty_print_rate(char *buf, const u32 rate);
1098void rs_update_last_rssi(struct iwl_mvm *mvm, 1154void rs_update_last_rssi(struct iwl_mvm *mvm,
1099 struct iwl_lq_sta *lq_sta, 1155 struct iwl_lq_sta *lq_sta,
@@ -1159,6 +1215,8 @@ void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
1159int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); 1215int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
1160bool iwl_mvm_ref_taken(struct iwl_mvm *mvm); 1216bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
1161void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); 1217void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
1218int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode);
1219int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode);
1162int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); 1220int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
1163 1221
1164/* BT Coex */ 1222/* BT Coex */
@@ -1260,11 +1318,13 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
1260 1318
1261/* hw scheduler queue config */ 1319/* hw scheduler queue config */
1262void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, 1320void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
1263 const struct iwl_trans_txq_scd_cfg *cfg); 1321 const struct iwl_trans_txq_scd_cfg *cfg,
1264void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue); 1322 unsigned int wdg_timeout);
1323void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags);
1265 1324
1266static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, 1325static inline
1267 u8 fifo) 1326void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
1327 u8 fifo, unsigned int wdg_timeout)
1268{ 1328{
1269 struct iwl_trans_txq_scd_cfg cfg = { 1329 struct iwl_trans_txq_scd_cfg cfg = {
1270 .fifo = fifo, 1330 .fifo = fifo,
@@ -1273,12 +1333,13 @@ static inline void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
1273 .frame_limit = IWL_FRAME_LIMIT, 1333 .frame_limit = IWL_FRAME_LIMIT,
1274 }; 1334 };
1275 1335
1276 iwl_mvm_enable_txq(mvm, queue, 0, &cfg); 1336 iwl_mvm_enable_txq(mvm, queue, 0, &cfg, wdg_timeout);
1277} 1337}
1278 1338
1279static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, 1339static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
1280 int fifo, int sta_id, int tid, 1340 int fifo, int sta_id, int tid,
1281 int frame_limit, u16 ssn) 1341 int frame_limit, u16 ssn,
1342 unsigned int wdg_timeout)
1282{ 1343{
1283 struct iwl_trans_txq_scd_cfg cfg = { 1344 struct iwl_trans_txq_scd_cfg cfg = {
1284 .fifo = fifo, 1345 .fifo = fifo,
@@ -1288,7 +1349,7 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
1288 .aggregate = true, 1349 .aggregate = true,
1289 }; 1350 };
1290 1351
1291 iwl_mvm_enable_txq(mvm, queue, ssn, &cfg); 1352 iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
1292} 1353}
1293 1354
1294/* Assoc status */ 1355/* Assoc status */
@@ -1344,4 +1405,7 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
1344void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); 1405void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
1345void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); 1406void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
1346 1407
1408int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, enum iwl_fw_dbg_conf id);
1409void iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm);
1410
1347#endif /* __IWL_MVM_H__ */ 1411#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index d55fd8e3654c..5383429d96c1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -356,7 +356,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
356 max_section_size = IWL_MAX_NVM_SECTION_SIZE; 356 max_section_size = IWL_MAX_NVM_SECTION_SIZE;
357 else if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP) 357 else if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP)
358 max_section_size = IWL_MAX_NVM_8000A_SECTION_SIZE; 358 max_section_size = IWL_MAX_NVM_8000A_SECTION_SIZE;
359 else /* Family 8000 B-step */ 359 else /* Family 8000 B-step or C-step */
360 max_section_size = IWL_MAX_NVM_8000B_SECTION_SIZE; 360 max_section_size = IWL_MAX_NVM_8000B_SECTION_SIZE;
361 361
362 /* 362 /*
@@ -565,6 +565,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
565 mvm->nvm_data = iwl_parse_nvm_sections(mvm); 565 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
566 if (!mvm->nvm_data) 566 if (!mvm->nvm_data)
567 return -ENODATA; 567 return -ENODATA;
568 IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
569 mvm->nvm_data->nvm_version);
568 570
569 return 0; 571 return 0;
570} 572}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 97dfba50c682..2dffc3600ed3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -84,15 +84,8 @@
84#include "time-event.h" 84#include "time-event.h"
85#include "iwl-fw-error-dump.h" 85#include "iwl-fw-error-dump.h"
86 86
87/*
88 * module name, copyright, version, etc.
89 */
90#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" 87#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
91
92#define DRV_VERSION IWLWIFI_VERSION
93
94MODULE_DESCRIPTION(DRV_DESCRIPTION); 88MODULE_DESCRIPTION(DRV_DESCRIPTION);
95MODULE_VERSION(DRV_VERSION);
96MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 89MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
97MODULE_LICENSE("GPL"); 90MODULE_LICENSE("GPL");
98 91
@@ -100,6 +93,7 @@ static const struct iwl_op_mode_ops iwl_mvm_ops;
100 93
101struct iwl_mvm_mod_params iwlmvm_mod_params = { 94struct iwl_mvm_mod_params iwlmvm_mod_params = {
102 .power_scheme = IWL_POWER_SCHEME_BPS, 95 .power_scheme = IWL_POWER_SCHEME_BPS,
96 .tfd_q_hang_detect = true
103 /* rest of fields are 0 by default */ 97 /* rest of fields are 0 by default */
104}; 98};
105 99
@@ -109,6 +103,10 @@ MODULE_PARM_DESC(init_dbg,
109module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO); 103module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
110MODULE_PARM_DESC(power_scheme, 104MODULE_PARM_DESC(power_scheme,
111 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); 105 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
106module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
107 bool, S_IRUGO);
108MODULE_PARM_DESC(tfd_q_hang_detect,
109 "TFD queues hang detection (default: true");
112 110
113/* 111/*
114 * module init and exit functions 112 * module init and exit functions
@@ -146,13 +144,14 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
146 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 144 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
147 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; 145 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
148 u32 reg_val = 0; 146 u32 reg_val = 0;
147 u32 phy_config = iwl_mvm_get_phy_config(mvm);
149 148
150 radio_cfg_type = (mvm->fw->phy_config & FW_PHY_CFG_RADIO_TYPE) >> 149 radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
151 FW_PHY_CFG_RADIO_TYPE_POS; 150 FW_PHY_CFG_RADIO_TYPE_POS;
152 radio_cfg_step = (mvm->fw->phy_config & FW_PHY_CFG_RADIO_STEP) >> 151 radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
153 FW_PHY_CFG_RADIO_STEP_POS; 152 FW_PHY_CFG_RADIO_STEP_POS;
154 radio_cfg_dash = (mvm->fw->phy_config & FW_PHY_CFG_RADIO_DASH) >> 153 radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
155 FW_PHY_CFG_RADIO_DASH_POS; 154 FW_PHY_CFG_RADIO_DASH_POS;
156 155
157 /* SKU control */ 156 /* SKU control */
158 reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << 157 reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
@@ -240,6 +239,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
240 239
241 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false), 240 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
242 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true), 241 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
242 RX_HANDLER(SCAN_ITERATION_COMPLETE,
243 iwl_mvm_rx_scan_offload_iter_complete_notif, false),
243 RX_HANDLER(SCAN_OFFLOAD_COMPLETE, 244 RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
244 iwl_mvm_rx_scan_offload_complete_notif, true), 245 iwl_mvm_rx_scan_offload_complete_notif, true),
245 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results, 246 RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
@@ -274,6 +275,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
274 CMD(MGMT_MCAST_KEY), 275 CMD(MGMT_MCAST_KEY),
275 CMD(TX_CMD), 276 CMD(TX_CMD),
276 CMD(TXPATH_FLUSH), 277 CMD(TXPATH_FLUSH),
278 CMD(SHARED_MEM_CFG),
277 CMD(MAC_CONTEXT_CMD), 279 CMD(MAC_CONTEXT_CMD),
278 CMD(TIME_EVENT_CMD), 280 CMD(TIME_EVENT_CMD),
279 CMD(TIME_EVENT_NOTIFICATION), 281 CMD(TIME_EVENT_NOTIFICATION),
@@ -476,17 +478,19 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
476 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE) 478 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
477 trans_cfg.bc_table_dword = true; 479 trans_cfg.bc_table_dword = true;
478 480
479 if (!iwlwifi_mod_params.wd_disable)
480 trans_cfg.queue_watchdog_timeout = cfg->base_params->wd_timeout;
481 else
482 trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
483
484 trans_cfg.command_names = iwl_mvm_cmd_strings; 481 trans_cfg.command_names = iwl_mvm_cmd_strings;
485 482
486 trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; 483 trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
487 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; 484 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
488 trans_cfg.scd_set_active = true; 485 trans_cfg.scd_set_active = true;
489 486
487 trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
488
489 /* Set a short watchdog for the command queue */
490 trans_cfg.cmd_q_wdg_timeout =
491 iwlmvm_mod_params.tfd_q_hang_detect ? IWL_DEF_WD_TIMEOUT :
492 IWL_WATCHDOG_DISABLED;
493
490 snprintf(mvm->hw->wiphy->fw_version, 494 snprintf(mvm->hw->wiphy->fw_version,
491 sizeof(mvm->hw->wiphy->fw_version), 495 sizeof(mvm->hw->wiphy->fw_version),
492 "%s", fw->fw_version); 496 "%s", fw->fw_version);
@@ -517,10 +521,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
517 min_backoff = calc_min_backoff(trans, cfg); 521 min_backoff = calc_min_backoff(trans, cfg);
518 iwl_mvm_tt_initialize(mvm, min_backoff); 522 iwl_mvm_tt_initialize(mvm, min_backoff);
519 /* set the nvm_file_name according to priority */ 523 /* set the nvm_file_name according to priority */
520 if (iwlwifi_mod_params.nvm_file) 524 if (iwlwifi_mod_params.nvm_file) {
521 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; 525 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
522 else 526 } else {
523 mvm->nvm_file_name = mvm->cfg->default_nvm_file; 527 if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
528 (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP))
529 mvm->nvm_file_name = mvm->cfg->default_nvm_file_8000A;
530 else
531 mvm->nvm_file_name = mvm->cfg->default_nvm_file;
532 }
524 533
525 if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name, 534 if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
526 "not allowing power-up and not having nvm_file\n")) 535 "not allowing power-up and not having nvm_file\n"))
@@ -559,6 +568,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
559 if (!mvm->scan_cmd) 568 if (!mvm->scan_cmd)
560 goto out_free; 569 goto out_free;
561 570
571 /* Set EBS as successful as long as not stated otherwise by the FW. */
572 mvm->last_ebs_successful = true;
573
562 err = iwl_mvm_mac_setup_register(mvm); 574 err = iwl_mvm_mac_setup_register(mvm);
563 if (err) 575 if (err)
564 goto out_free; 576 goto out_free;
@@ -817,9 +829,20 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
817 struct iwl_mvm *mvm = 829 struct iwl_mvm *mvm =
818 container_of(work, struct iwl_mvm, fw_error_dump_wk); 830 container_of(work, struct iwl_mvm, fw_error_dump_wk);
819 831
832 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT))
833 return;
834
820 mutex_lock(&mvm->mutex); 835 mutex_lock(&mvm->mutex);
821 iwl_mvm_fw_error_dump(mvm); 836 iwl_mvm_fw_error_dump(mvm);
837
838 /* start recording again if the firmware is not crashed */
839 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
840 mvm->fw->dbg_dest_tlv &&
841 iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
842
822 mutex_unlock(&mvm->mutex); 843 mutex_unlock(&mvm->mutex);
844
845 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
823} 846}
824 847
825void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) 848void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
@@ -855,7 +878,10 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
855 * If WoWLAN fw asserted, don't restart either, mac80211 878 * If WoWLAN fw asserted, don't restart either, mac80211
856 * can't recover this since we're already half suspended. 879 * can't recover this since we're already half suspended.
857 */ 880 */
858 if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 881 if (!mvm->restart_fw && fw_error) {
882 schedule_work(&mvm->fw_error_dump_wk);
883 } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
884 &mvm->status)) {
859 struct iwl_mvm_reprobe *reprobe; 885 struct iwl_mvm_reprobe *reprobe;
860 886
861 IWL_ERR(mvm, 887 IWL_ERR(mvm,
@@ -879,16 +905,13 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
879 reprobe->dev = mvm->trans->dev; 905 reprobe->dev = mvm->trans->dev;
880 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); 906 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
881 schedule_work(&reprobe->work); 907 schedule_work(&reprobe->work);
882 } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && 908 } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
883 (!fw_error || mvm->restart_fw)) {
884 /* don't let the transport/FW power down */ 909 /* don't let the transport/FW power down */
885 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 910 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
886 911
887 if (fw_error && mvm->restart_fw > 0) 912 if (fw_error && mvm->restart_fw > 0)
888 mvm->restart_fw--; 913 mvm->restart_fw--;
889 ieee80211_restart_hw(mvm->hw); 914 ieee80211_restart_hw(mvm->hw);
890 } else if (fw_error) {
891 schedule_work(&mvm->fw_error_dump_wk);
892 } 915 }
893} 916}
894 917
@@ -1031,7 +1054,8 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
1031out: 1054out:
1032 rcu_read_unlock(); 1055 rcu_read_unlock();
1033} 1056}
1034static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) 1057
1058int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
1035{ 1059{
1036 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 1060 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1037 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE; 1061 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
@@ -1047,6 +1071,7 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
1047 }; 1071 };
1048 struct iwl_d3_manager_config d3_cfg_cmd = { 1072 struct iwl_d3_manager_config d3_cfg_cmd = {
1049 .min_sleep_time = cpu_to_le32(1000), 1073 .min_sleep_time = cpu_to_le32(1000),
1074 .wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
1050 }; 1075 };
1051 1076
1052 IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); 1077 IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
@@ -1146,7 +1171,7 @@ void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
1146 1171
1147 if (mvm->d0i3_offloading && qos_seq) { 1172 if (mvm->d0i3_offloading && qos_seq) {
1148 /* update qos seq numbers if offloading was enabled */ 1173 /* update qos seq numbers if offloading was enabled */
1149 mvm_ap_sta = (struct iwl_mvm_sta *)sta->drv_priv; 1174 mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
1150 for (i = 0; i < IWL_MAX_TID_COUNT; i++) { 1175 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1151 u16 seq = le16_to_cpu(qos_seq[i]); 1176 u16 seq = le16_to_cpu(qos_seq[i]);
1152 /* firmware stores last-used one, we store next one */ 1177 /* firmware stores last-used one, we store next one */
@@ -1245,7 +1270,7 @@ out:
1245 return ret; 1270 return ret;
1246} 1271}
1247 1272
1248static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) 1273int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
1249{ 1274{
1250 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 1275 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1251 1276
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index 1c0d4a45c1a8..5b43616eeb06 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -170,13 +170,13 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
170 active_cnt = 2; 170 active_cnt = 2;
171 } 171 }
172 172
173 cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant << 173 cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
174 PHY_RX_CHAIN_VALID_POS); 174 PHY_RX_CHAIN_VALID_POS);
175 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); 175 cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
176 cmd->rxchain_info |= cpu_to_le32(active_cnt << 176 cmd->rxchain_info |= cpu_to_le32(active_cnt <<
177 PHY_RX_CHAIN_MIMO_CNT_POS); 177 PHY_RX_CHAIN_MIMO_CNT_POS);
178 178
179 cmd->txchain_info = cpu_to_le32(mvm->fw->valid_tx_ant); 179 cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
180} 180}
181 181
182/* 182/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 30ceb67ed7a7..194bd1f939ca 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -39,28 +39,16 @@
39#include "sta.h" 39#include "sta.h"
40#include "iwl-op-mode.h" 40#include "iwl-op-mode.h"
41#include "mvm.h" 41#include "mvm.h"
42#include "debugfs.h"
42 43
43#define RS_NAME "iwl-mvm-rs" 44#define RS_NAME "iwl-mvm-rs"
44 45
45#define NUM_TRY_BEFORE_ANT_TOGGLE 1
46#define RS_LEGACY_RETRIES_PER_RATE 1
47#define RS_HT_VHT_RETRIES_PER_RATE 2
48#define RS_HT_VHT_RETRIES_PER_RATE_TW 1
49#define RS_INITIAL_MIMO_NUM_RATES 3
50#define RS_INITIAL_SISO_NUM_RATES 3
51#define RS_INITIAL_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
52#define RS_SECONDARY_LEGACY_NUM_RATES LINK_QUAL_MAX_RETRY_NUM
53#define RS_SECONDARY_SISO_NUM_RATES 3
54#define RS_SECONDARY_SISO_RETRIES 1
55
56#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ 46#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
57#define IWL_RATE_MIN_FAILURE_TH 3 /* min failures to calc tpt */
58#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
59 47
60/* max allowed rate miss before sync LQ cmd */ 48/* Calculations of success ratio are done in fixed point where 12800 is 100%.
61#define IWL_MISSED_RATE_MAX 15 49 * Use this macro when dealing with thresholds consts set as a percentage
62#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ) 50 */
63#define RS_IDLE_TIMEOUT (5*HZ) 51#define RS_PERCENT(x) (128 * x)
64 52
65static u8 rs_ht_to_legacy[] = { 53static u8 rs_ht_to_legacy[] = {
66 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX, 54 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
@@ -173,7 +161,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
173 if (sta->smps_mode == IEEE80211_SMPS_STATIC) 161 if (sta->smps_mode == IEEE80211_SMPS_STATIC)
174 return false; 162 return false;
175 163
176 if (num_of_ant(mvm->fw->valid_tx_ant) < 2) 164 if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2)
177 return false; 165 return false;
178 166
179 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) 167 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
@@ -613,7 +601,8 @@ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
613 * at this rate. window->data contains the bitmask of successful 601 * at this rate. window->data contains the bitmask of successful
614 * packets. 602 * packets.
615 */ 603 */
616static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, 604static int _rs_collect_tx_data(struct iwl_mvm *mvm,
605 struct iwl_scale_tbl_info *tbl,
617 int scale_index, int attempts, int successes, 606 int scale_index, int attempts, int successes,
618 struct iwl_rate_scale_data *window) 607 struct iwl_rate_scale_data *window)
619{ 608{
@@ -668,8 +657,8 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
668 fail_count = window->counter - window->success_counter; 657 fail_count = window->counter - window->success_counter;
669 658
670 /* Calculate average throughput, if we have enough history. */ 659 /* Calculate average throughput, if we have enough history. */
671 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || 660 if ((fail_count >= IWL_MVM_RS_RATE_MIN_FAILURE_TH) ||
672 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) 661 (window->success_counter >= IWL_MVM_RS_RATE_MIN_SUCCESS_TH))
673 window->average_tpt = (window->success_ratio * tpt + 64) / 128; 662 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
674 else 663 else
675 window->average_tpt = IWL_INVALID_VALUE; 664 window->average_tpt = IWL_INVALID_VALUE;
@@ -677,7 +666,8 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
677 return 0; 666 return 0;
678} 667}
679 668
680static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta, 669static int rs_collect_tx_data(struct iwl_mvm *mvm,
670 struct iwl_lq_sta *lq_sta,
681 struct iwl_scale_tbl_info *tbl, 671 struct iwl_scale_tbl_info *tbl,
682 int scale_index, int attempts, int successes, 672 int scale_index, int attempts, int successes,
683 u8 reduced_txp) 673 u8 reduced_txp)
@@ -698,7 +688,7 @@ static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
698 /* Select window for current tx bit rate */ 688 /* Select window for current tx bit rate */
699 window = &(tbl->win[scale_index]); 689 window = &(tbl->win[scale_index]);
700 690
701 ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes, 691 ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
702 window); 692 window);
703 if (ret) 693 if (ret)
704 return ret; 694 return ret;
@@ -707,7 +697,7 @@ static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
707 return -EINVAL; 697 return -EINVAL;
708 698
709 window = &tbl->tpc_win[reduced_txp]; 699 window = &tbl->tpc_win[reduced_txp];
710 return _rs_collect_tx_data(tbl, scale_index, attempts, successes, 700 return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
711 window); 701 window);
712} 702}
713 703
@@ -928,7 +918,6 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
928 break; 918 break;
929 if (rate_mask & (1 << low)) 919 if (rate_mask & (1 << low))
930 break; 920 break;
931 IWL_DEBUG_RATE(mvm, "Skipping masked lower rate: %d\n", low);
932 } 921 }
933 922
934 high = index; 923 high = index;
@@ -938,7 +927,6 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
938 break; 927 break;
939 if (rate_mask & (1 << high)) 928 if (rate_mask & (1 << high))
940 break; 929 break;
941 IWL_DEBUG_RATE(mvm, "Skipping masked higher rate: %d\n", high);
942 } 930 }
943 931
944 return (high << 8) | low; 932 return (high << 8) | low;
@@ -1004,7 +992,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
1004 } 992 }
1005 993
1006 if (num_of_ant(rate->ant) > 1) 994 if (num_of_ant(rate->ant) > 1)
1007 rate->ant = first_antenna(mvm->fw->valid_tx_ant); 995 rate->ant = first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
1008 996
1009 /* Relevant in both switching to SISO or Legacy */ 997 /* Relevant in both switching to SISO or Legacy */
1010 rate->sgi = false; 998 rate->sgi = false;
@@ -1125,7 +1113,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1125 } 1113 }
1126 1114
1127 if (time_after(jiffies, 1115 if (time_after(jiffies,
1128 (unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) { 1116 (unsigned long)(lq_sta->last_tx +
1117 (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
1129 int t; 1118 int t;
1130 1119
1131 IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); 1120 IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
@@ -1158,7 +1147,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1158 * ... driver. 1147 * ... driver.
1159 */ 1148 */
1160 lq_sta->missed_rate_counter++; 1149 lq_sta->missed_rate_counter++;
1161 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { 1150 if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
1162 lq_sta->missed_rate_counter = 0; 1151 lq_sta->missed_rate_counter = 0;
1163 IWL_DEBUG_RATE(mvm, 1152 IWL_DEBUG_RATE(mvm,
1164 "Too many rates mismatch. Send sync LQ. rs_state %d\n", 1153 "Too many rates mismatch. Send sync LQ. rs_state %d\n",
@@ -1213,7 +1202,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1213 1202
1214 ucode_rate = le32_to_cpu(table->rs_table[0]); 1203 ucode_rate = le32_to_cpu(table->rs_table[0]);
1215 rs_rate_from_ucode_rate(ucode_rate, info->band, &rate); 1204 rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
1216 rs_collect_tx_data(lq_sta, curr_tbl, rate.index, 1205 rs_collect_tx_data(mvm, lq_sta, curr_tbl, rate.index,
1217 info->status.ampdu_len, 1206 info->status.ampdu_len,
1218 info->status.ampdu_ack_len, 1207 info->status.ampdu_ack_len,
1219 reduced_txp); 1208 reduced_txp);
@@ -1249,7 +1238,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1249 else 1238 else
1250 continue; 1239 continue;
1251 1240
1252 rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1, 1241 rs_collect_tx_data(mvm, lq_sta, tmp_tbl, rate.index, 1,
1253 i < retries ? 0 : legacy_success, 1242 i < retries ? 0 : legacy_success,
1254 reduced_txp); 1243 reduced_txp);
1255 } 1244 }
@@ -1303,13 +1292,13 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
1303 IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n"); 1292 IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
1304 lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN; 1293 lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
1305 if (is_legacy) { 1294 if (is_legacy) {
1306 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; 1295 lq_sta->table_count_limit = IWL_MVM_RS_LEGACY_TABLE_COUNT;
1307 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; 1296 lq_sta->max_failure_limit = IWL_MVM_RS_LEGACY_FAILURE_LIMIT;
1308 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT; 1297 lq_sta->max_success_limit = IWL_MVM_RS_LEGACY_SUCCESS_LIMIT;
1309 } else { 1298 } else {
1310 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; 1299 lq_sta->table_count_limit = IWL_MVM_RS_NON_LEGACY_TABLE_COUNT;
1311 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; 1300 lq_sta->max_failure_limit = IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT;
1312 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; 1301 lq_sta->max_success_limit = IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT;
1313 } 1302 }
1314 lq_sta->table_count = 0; 1303 lq_sta->table_count = 0;
1315 lq_sta->total_failed = 0; 1304 lq_sta->total_failed = 0;
@@ -1318,6 +1307,13 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
1318 lq_sta->visited_columns = 0; 1307 lq_sta->visited_columns = 0;
1319} 1308}
1320 1309
1310static inline int rs_get_max_rate_from_mask(unsigned long rate_mask)
1311{
1312 if (rate_mask)
1313 return find_last_bit(&rate_mask, BITS_PER_LONG);
1314 return IWL_RATE_INVALID;
1315}
1316
1321static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta, 1317static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
1322 const struct rs_tx_column *column) 1318 const struct rs_tx_column *column)
1323{ 1319{
@@ -1420,7 +1416,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
1420 u32 target_tpt; 1416 u32 target_tpt;
1421 int rate_idx; 1417 int rate_idx;
1422 1418
1423 if (success_ratio > RS_SR_NO_DECREASE) { 1419 if (success_ratio > IWL_MVM_RS_SR_NO_DECREASE) {
1424 target_tpt = 100 * expected_current_tpt; 1420 target_tpt = 100 * expected_current_tpt;
1425 IWL_DEBUG_RATE(mvm, 1421 IWL_DEBUG_RATE(mvm,
1426 "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n", 1422 "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
@@ -1488,7 +1484,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1488 flush_interval_passed = 1484 flush_interval_passed =
1489 time_after(jiffies, 1485 time_after(jiffies,
1490 (unsigned long)(lq_sta->flush_timer + 1486 (unsigned long)(lq_sta->flush_timer +
1491 RS_STAY_IN_COLUMN_TIMEOUT)); 1487 (IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT * HZ)));
1492 1488
1493 /* 1489 /*
1494 * Check if we should allow search for new modulation mode. 1490 * Check if we should allow search for new modulation mode.
@@ -1567,7 +1563,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1567 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column]; 1563 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
1568 const struct rs_tx_column *next_col; 1564 const struct rs_tx_column *next_col;
1569 allow_column_func_t allow_func; 1565 allow_column_func_t allow_func;
1570 u8 valid_ants = mvm->fw->valid_tx_ant; 1566 u8 valid_ants = iwl_mvm_get_valid_tx_ant(mvm);
1571 const u16 *expected_tpt_tbl; 1567 const u16 *expected_tpt_tbl;
1572 u16 tpt, max_expected_tpt; 1568 u16 tpt, max_expected_tpt;
1573 1569
@@ -1613,8 +1609,12 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1613 continue; 1609 continue;
1614 1610
1615 max_rate = rs_get_max_allowed_rate(lq_sta, next_col); 1611 max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
1616 if (WARN_ON_ONCE(max_rate == IWL_RATE_INVALID)) 1612 if (max_rate == IWL_RATE_INVALID) {
1613 IWL_DEBUG_RATE(mvm,
1614 "Skip column %d: no rate is allowed in this column\n",
1615 next_col_id);
1617 continue; 1616 continue;
1617 }
1618 1618
1619 max_expected_tpt = expected_tpt_tbl[max_rate]; 1619 max_expected_tpt = expected_tpt_tbl[max_rate];
1620 if (tpt >= max_expected_tpt) { 1620 if (tpt >= max_expected_tpt) {
@@ -1724,7 +1724,8 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
1724{ 1724{
1725 enum rs_action action = RS_ACTION_STAY; 1725 enum rs_action action = RS_ACTION_STAY;
1726 1726
1727 if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) { 1727 if ((sr <= RS_PERCENT(IWL_MVM_RS_SR_FORCE_DECREASE)) ||
1728 (current_tpt == 0)) {
1728 IWL_DEBUG_RATE(mvm, 1729 IWL_DEBUG_RATE(mvm,
1729 "Decrease rate because of low SR\n"); 1730 "Decrease rate because of low SR\n");
1730 return RS_ACTION_DOWNSCALE; 1731 return RS_ACTION_DOWNSCALE;
@@ -1783,7 +1784,7 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
1783 1784
1784out: 1785out:
1785 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) { 1786 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
1786 if (sr >= RS_SR_NO_DECREASE) { 1787 if (sr >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
1787 IWL_DEBUG_RATE(mvm, 1788 IWL_DEBUG_RATE(mvm,
1788 "SR is above NO DECREASE. Avoid downscale\n"); 1789 "SR is above NO DECREASE. Avoid downscale\n");
1789 action = RS_ACTION_STAY; 1790 action = RS_ACTION_STAY;
@@ -1802,18 +1803,10 @@ out:
1802static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 1803static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1803 struct iwl_lq_sta *lq_sta) 1804 struct iwl_lq_sta *lq_sta)
1804{ 1805{
1805 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1806 struct ieee80211_vif *vif = mvmsta->vif;
1807 bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
1808 !vif->bss_conf.ps);
1809
1810 /* Our chip supports Tx STBC and the peer is an HT/VHT STA which 1806 /* Our chip supports Tx STBC and the peer is an HT/VHT STA which
1811 * supports STBC of at least 1*SS 1807 * supports STBC of at least 1*SS
1812 */ 1808 */
1813 if (!lq_sta->stbc) 1809 if (!lq_sta->stbc_capable)
1814 return false;
1815
1816 if (!mvm->ps_disabled && !sta_ps_disabled)
1817 return false; 1810 return false;
1818 1811
1819 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) 1812 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
@@ -1825,11 +1818,11 @@ static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1825static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index, 1818static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
1826 int *weaker, int *stronger) 1819 int *weaker, int *stronger)
1827{ 1820{
1828 *weaker = index + TPC_TX_POWER_STEP; 1821 *weaker = index + IWL_MVM_RS_TPC_TX_POWER_STEP;
1829 if (*weaker > TPC_MAX_REDUCTION) 1822 if (*weaker > TPC_MAX_REDUCTION)
1830 *weaker = TPC_INVALID; 1823 *weaker = TPC_INVALID;
1831 1824
1832 *stronger = index - TPC_TX_POWER_STEP; 1825 *stronger = index - IWL_MVM_RS_TPC_TX_POWER_STEP;
1833 if (*stronger < 0) 1826 if (*stronger < 0)
1834 *stronger = TPC_INVALID; 1827 *stronger = TPC_INVALID;
1835} 1828}
@@ -1885,7 +1878,8 @@ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
1885 } 1878 }
1886 1879
1887 /* Too many failures, increase txp */ 1880 /* Too many failures, increase txp */
1888 if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) { 1881 if (sr <= RS_PERCENT(IWL_MVM_RS_TPC_SR_FORCE_INCREASE) ||
1882 current_tpt == 0) {
1889 IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n"); 1883 IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
1890 return TPC_ACTION_NO_RESTIRCTION; 1884 return TPC_ACTION_NO_RESTIRCTION;
1891 } 1885 }
@@ -1908,7 +1902,8 @@ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
1908 } 1902 }
1909 1903
1910 /* next, increase if needed */ 1904 /* next, increase if needed */
1911 if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) { 1905 if (sr < RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
1906 strong != TPC_INVALID) {
1912 if (weak_tpt == IWL_INVALID_VALUE && 1907 if (weak_tpt == IWL_INVALID_VALUE &&
1913 strong_tpt != IWL_INVALID_VALUE && 1908 strong_tpt != IWL_INVALID_VALUE &&
1914 current_tpt < strong_tpt) { 1909 current_tpt < strong_tpt) {
@@ -1935,7 +1930,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
1935 struct iwl_lq_sta *lq_sta, 1930 struct iwl_lq_sta *lq_sta,
1936 struct iwl_scale_tbl_info *tbl) 1931 struct iwl_scale_tbl_info *tbl)
1937{ 1932{
1938 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 1933 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1939 struct ieee80211_vif *vif = mvm_sta->vif; 1934 struct ieee80211_vif *vif = mvm_sta->vif;
1940 struct ieee80211_chanctx_conf *chanctx_conf; 1935 struct ieee80211_chanctx_conf *chanctx_conf;
1941 enum ieee80211_band band; 1936 enum ieee80211_band band;
@@ -2044,7 +2039,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2044 u16 high_low; 2039 u16 high_low;
2045 s32 sr; 2040 s32 sr;
2046 u8 prev_agg = lq_sta->is_agg; 2041 u8 prev_agg = lq_sta->is_agg;
2047 struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv; 2042 struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
2048 struct iwl_mvm_tid_data *tid_data; 2043 struct iwl_mvm_tid_data *tid_data;
2049 struct rs_rate *rate; 2044 struct rs_rate *rate;
2050 2045
@@ -2106,8 +2101,8 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
2106 * in current association (use new rate found above). 2101 * in current association (use new rate found above).
2107 */ 2102 */
2108 fail_count = window->counter - window->success_counter; 2103 fail_count = window->counter - window->success_counter;
2109 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) && 2104 if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
2110 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) { 2105 (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
2111 IWL_DEBUG_RATE(mvm, 2106 IWL_DEBUG_RATE(mvm,
2112 "(%s: %d): Test Window: succ %d total %d\n", 2107 "(%s: %d): Test Window: succ %d total %d\n",
2113 rs_pretty_lq_type(rate->type), 2108 rs_pretty_lq_type(rate->type),
@@ -2385,7 +2380,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2385 int i, nentries; 2380 int i, nentries;
2386 s8 best_rssi = S8_MIN; 2381 s8 best_rssi = S8_MIN;
2387 u8 best_ant = ANT_NONE; 2382 u8 best_ant = ANT_NONE;
2388 u8 valid_tx_ant = mvm->fw->valid_tx_ant; 2383 u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
2389 const struct rs_init_rate_info *initial_rates; 2384 const struct rs_init_rate_info *initial_rates;
2390 2385
2391 for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { 2386 for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
@@ -2530,7 +2525,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2530static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, 2525static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
2531 gfp_t gfp) 2526 gfp_t gfp)
2532{ 2527{
2533 struct iwl_mvm_sta *sta_priv = (struct iwl_mvm_sta *)sta->drv_priv; 2528 struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
2534 struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate; 2529 struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
2535 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 2530 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2536 struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta; 2531 struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
@@ -2606,68 +2601,121 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
2606 } 2601 }
2607} 2602}
2608 2603
2604static void rs_ht_init(struct iwl_mvm *mvm,
2605 struct ieee80211_sta *sta,
2606 struct iwl_lq_sta *lq_sta,
2607 struct ieee80211_sta_ht_cap *ht_cap)
2608{
2609 /* active_siso_rate mask includes 9 MBits (bit 5),
2610 * and CCK (bits 0-3), supp_rates[] does not;
2611 * shift to convert format, force 9 MBits off.
2612 */
2613 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2614 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2615 lq_sta->active_siso_rate &= ~((u16)0x2);
2616 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2617
2618 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2619 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2620 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2621 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2622
2623 if (mvm->cfg->ht_params->ldpc &&
2624 (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
2625 lq_sta->ldpc = true;
2626
2627 if (mvm->cfg->ht_params->stbc &&
2628 (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
2629 (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
2630 lq_sta->stbc_capable = true;
2631
2632 lq_sta->is_vht = false;
2633}
2634
2635static void rs_vht_init(struct iwl_mvm *mvm,
2636 struct ieee80211_sta *sta,
2637 struct iwl_lq_sta *lq_sta,
2638 struct ieee80211_sta_vht_cap *vht_cap)
2639{
2640 rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
2641
2642 if (mvm->cfg->ht_params->ldpc &&
2643 (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
2644 lq_sta->ldpc = true;
2645
2646 if (mvm->cfg->ht_params->stbc &&
2647 (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
2648 (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
2649 lq_sta->stbc_capable = true;
2650
2651 if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
2652 (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
2653 (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
2654 lq_sta->bfer_capable = true;
2655
2656 lq_sta->is_vht = true;
2657}
2658
2609#ifdef CONFIG_IWLWIFI_DEBUGFS 2659#ifdef CONFIG_IWLWIFI_DEBUGFS
2610static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm, 2660static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
2611 struct iwl_mvm_frame_stats *stats)
2612{ 2661{
2613 spin_lock_bh(&mvm->drv_stats_lock); 2662 spin_lock_bh(&mvm->drv_stats_lock);
2614 memset(stats, 0, sizeof(*stats)); 2663 memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
2615 spin_unlock_bh(&mvm->drv_stats_lock); 2664 spin_unlock_bh(&mvm->drv_stats_lock);
2616} 2665}
2617 2666
2618void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, 2667void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
2619 struct iwl_mvm_frame_stats *stats,
2620 u32 rate, bool agg)
2621{ 2668{
2622 u8 nss = 0, mcs = 0; 2669 u8 nss = 0, mcs = 0;
2623 2670
2624 spin_lock(&mvm->drv_stats_lock); 2671 spin_lock(&mvm->drv_stats_lock);
2625 2672
2626 if (agg) 2673 if (agg)
2627 stats->agg_frames++; 2674 mvm->drv_rx_stats.agg_frames++;
2628 2675
2629 stats->success_frames++; 2676 mvm->drv_rx_stats.success_frames++;
2630 2677
2631 switch (rate & RATE_MCS_CHAN_WIDTH_MSK) { 2678 switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
2632 case RATE_MCS_CHAN_WIDTH_20: 2679 case RATE_MCS_CHAN_WIDTH_20:
2633 stats->bw_20_frames++; 2680 mvm->drv_rx_stats.bw_20_frames++;
2634 break; 2681 break;
2635 case RATE_MCS_CHAN_WIDTH_40: 2682 case RATE_MCS_CHAN_WIDTH_40:
2636 stats->bw_40_frames++; 2683 mvm->drv_rx_stats.bw_40_frames++;
2637 break; 2684 break;
2638 case RATE_MCS_CHAN_WIDTH_80: 2685 case RATE_MCS_CHAN_WIDTH_80:
2639 stats->bw_80_frames++; 2686 mvm->drv_rx_stats.bw_80_frames++;
2640 break; 2687 break;
2641 default: 2688 default:
2642 WARN_ONCE(1, "bad BW. rate 0x%x", rate); 2689 WARN_ONCE(1, "bad BW. rate 0x%x", rate);
2643 } 2690 }
2644 2691
2645 if (rate & RATE_MCS_HT_MSK) { 2692 if (rate & RATE_MCS_HT_MSK) {
2646 stats->ht_frames++; 2693 mvm->drv_rx_stats.ht_frames++;
2647 mcs = rate & RATE_HT_MCS_RATE_CODE_MSK; 2694 mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
2648 nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; 2695 nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
2649 } else if (rate & RATE_MCS_VHT_MSK) { 2696 } else if (rate & RATE_MCS_VHT_MSK) {
2650 stats->vht_frames++; 2697 mvm->drv_rx_stats.vht_frames++;
2651 mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; 2698 mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
2652 nss = ((rate & RATE_VHT_MCS_NSS_MSK) >> 2699 nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
2653 RATE_VHT_MCS_NSS_POS) + 1; 2700 RATE_VHT_MCS_NSS_POS) + 1;
2654 } else { 2701 } else {
2655 stats->legacy_frames++; 2702 mvm->drv_rx_stats.legacy_frames++;
2656 } 2703 }
2657 2704
2658 if (nss == 1) 2705 if (nss == 1)
2659 stats->siso_frames++; 2706 mvm->drv_rx_stats.siso_frames++;
2660 else if (nss == 2) 2707 else if (nss == 2)
2661 stats->mimo2_frames++; 2708 mvm->drv_rx_stats.mimo2_frames++;
2662 2709
2663 if (rate & RATE_MCS_SGI_MSK) 2710 if (rate & RATE_MCS_SGI_MSK)
2664 stats->sgi_frames++; 2711 mvm->drv_rx_stats.sgi_frames++;
2665 else 2712 else
2666 stats->ngi_frames++; 2713 mvm->drv_rx_stats.ngi_frames++;
2667 2714
2668 stats->last_rates[stats->last_frame_idx] = rate; 2715 mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate;
2669 stats->last_frame_idx = (stats->last_frame_idx + 1) % 2716 mvm->drv_rx_stats.last_frame_idx =
2670 ARRAY_SIZE(stats->last_rates); 2717 (mvm->drv_rx_stats.last_frame_idx + 1) %
2718 ARRAY_SIZE(mvm->drv_rx_stats.last_rates);
2671 2719
2672 spin_unlock(&mvm->drv_stats_lock); 2720 spin_unlock(&mvm->drv_stats_lock);
2673} 2721}
@@ -2683,14 +2731,11 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2683 struct ieee80211_hw *hw = mvm->hw; 2731 struct ieee80211_hw *hw = mvm->hw;
2684 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2732 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2685 struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2733 struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2686 struct iwl_mvm_sta *sta_priv; 2734 struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
2687 struct iwl_lq_sta *lq_sta; 2735 struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
2688 struct ieee80211_supported_band *sband; 2736 struct ieee80211_supported_band *sband;
2689 unsigned long supp; /* must be unsigned long for for_each_set_bit */ 2737 unsigned long supp; /* must be unsigned long for for_each_set_bit */
2690 2738
2691 sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
2692 lq_sta = &sta_priv->lq_sta;
2693
2694 /* clear all non-persistent lq data */ 2739 /* clear all non-persistent lq data */
2695 memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); 2740 memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
2696 2741
@@ -2712,7 +2757,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2712 * previous packets? Need to have IEEE 802.1X auth succeed immediately 2757 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2713 * after assoc.. */ 2758 * after assoc.. */
2714 2759
2715 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; 2760 lq_sta->missed_rate_counter = IWL_MVM_RS_MISSED_RATE_MAX;
2716 lq_sta->band = sband->band; 2761 lq_sta->band = sband->band;
2717 /* 2762 /*
2718 * active legacy rates as per supported rates bitmap 2763 * active legacy rates as per supported rates bitmap
@@ -2723,61 +2768,28 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2723 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); 2768 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
2724 2769
2725 /* TODO: should probably account for rx_highest for both HT/VHT */ 2770 /* TODO: should probably account for rx_highest for both HT/VHT */
2726 if (!vht_cap || !vht_cap->vht_supported) { 2771 if (!vht_cap || !vht_cap->vht_supported)
2727 /* active_siso_rate mask includes 9 MBits (bit 5), 2772 rs_ht_init(mvm, sta, lq_sta, ht_cap);
2728 * and CCK (bits 0-3), supp_rates[] does not; 2773 else
2729 * shift to convert format, force 9 MBits off. 2774 rs_vht_init(mvm, sta, lq_sta, vht_cap);
2730 */
2731 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2732 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2733 lq_sta->active_siso_rate &= ~((u16)0x2);
2734 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2735
2736 /* Same here */
2737 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2738 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2739 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2740 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2741
2742 lq_sta->is_vht = false;
2743 if (mvm->cfg->ht_params->ldpc &&
2744 (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
2745 lq_sta->ldpc = true;
2746
2747 if (mvm->cfg->ht_params->stbc &&
2748 (num_of_ant(mvm->fw->valid_tx_ant) > 1) &&
2749 (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
2750 lq_sta->stbc = true;
2751 } else {
2752 rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
2753 lq_sta->is_vht = true;
2754
2755 if (mvm->cfg->ht_params->ldpc &&
2756 (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
2757 lq_sta->ldpc = true;
2758
2759 if (mvm->cfg->ht_params->stbc &&
2760 (num_of_ant(mvm->fw->valid_tx_ant) > 1) &&
2761 (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
2762 lq_sta->stbc = true;
2763 }
2764 2775
2765 if (IWL_MVM_RS_DISABLE_MIMO) 2776 if (IWL_MVM_RS_DISABLE_P2P_MIMO && sta_priv->vif->p2p)
2766 lq_sta->active_mimo2_rate = 0; 2777 lq_sta->active_mimo2_rate = 0;
2767 2778
2768 lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate, 2779 lq_sta->max_legacy_rate_idx =
2769 BITS_PER_LONG); 2780 rs_get_max_rate_from_mask(lq_sta->active_legacy_rate);
2770 lq_sta->max_siso_rate_idx = find_last_bit(&lq_sta->active_siso_rate, 2781 lq_sta->max_siso_rate_idx =
2771 BITS_PER_LONG); 2782 rs_get_max_rate_from_mask(lq_sta->active_siso_rate);
2772 lq_sta->max_mimo2_rate_idx = find_last_bit(&lq_sta->active_mimo2_rate, 2783 lq_sta->max_mimo2_rate_idx =
2773 BITS_PER_LONG); 2784 rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate);
2774 2785
2775 IWL_DEBUG_RATE(mvm, 2786 IWL_DEBUG_RATE(mvm,
2776 "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC%d\n", 2787 "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n",
2777 lq_sta->active_legacy_rate, 2788 lq_sta->active_legacy_rate,
2778 lq_sta->active_siso_rate, 2789 lq_sta->active_siso_rate,
2779 lq_sta->active_mimo2_rate, 2790 lq_sta->active_mimo2_rate,
2780 lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc); 2791 lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable,
2792 lq_sta->bfer_capable);
2781 IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n", 2793 IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
2782 lq_sta->max_legacy_rate_idx, 2794 lq_sta->max_legacy_rate_idx,
2783 lq_sta->max_siso_rate_idx, 2795 lq_sta->max_siso_rate_idx,
@@ -2785,14 +2797,14 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2785 2797
2786 /* These values will be overridden later */ 2798 /* These values will be overridden later */
2787 lq_sta->lq.single_stream_ant_msk = 2799 lq_sta->lq.single_stream_ant_msk =
2788 first_antenna(mvm->fw->valid_tx_ant); 2800 first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
2789 lq_sta->lq.dual_stream_ant_msk = ANT_AB; 2801 lq_sta->lq.dual_stream_ant_msk = ANT_AB;
2790 2802
2791 /* as default allow aggregation for all tids */ 2803 /* as default allow aggregation for all tids */
2792 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; 2804 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2793 lq_sta->is_agg = 0; 2805 lq_sta->is_agg = 0;
2794#ifdef CONFIG_IWLWIFI_DEBUGFS 2806#ifdef CONFIG_IWLWIFI_DEBUGFS
2795 iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats); 2807 iwl_mvm_reset_frame_stats(mvm);
2796#endif 2808#endif
2797 rs_initialize_lq(mvm, sta, lq_sta, band, init); 2809 rs_initialize_lq(mvm, sta, lq_sta, band, init);
2798} 2810}
@@ -2861,12 +2873,13 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
2861 int index = *rs_table_index; 2873 int index = *rs_table_index;
2862 2874
2863 for (i = 0; i < num_rates && index < end; i++) { 2875 for (i = 0; i < num_rates && index < end; i++) {
2864 ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm, rate)); 2876 for (j = 0; j < num_retries && index < end; j++, index++) {
2865 for (j = 0; j < num_retries && index < end; j++, index++) 2877 ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm,
2878 rate));
2866 rs_table[index] = ucode_rate; 2879 rs_table[index] = ucode_rate;
2867 2880 if (toggle_ant)
2868 if (toggle_ant) 2881 rs_toggle_antenna(valid_tx_ant, rate);
2869 rs_toggle_antenna(valid_tx_ant, rate); 2882 }
2870 2883
2871 prev_rate_idx = rate->index; 2884 prev_rate_idx = rate->index;
2872 bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate); 2885 bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
@@ -2874,7 +2887,7 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
2874 break; 2887 break;
2875 } 2888 }
2876 2889
2877 if (!bottom_reached) 2890 if (!bottom_reached && !is_legacy(rate))
2878 rate->index = prev_rate_idx; 2891 rate->index = prev_rate_idx;
2879 2892
2880 *rs_table_index = index; 2893 *rs_table_index = index;
@@ -2913,18 +2926,22 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
2913 2926
2914 memcpy(&rate, initial_rate, sizeof(rate)); 2927 memcpy(&rate, initial_rate, sizeof(rate));
2915 2928
2916 valid_tx_ant = mvm->fw->valid_tx_ant; 2929 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
2917 rate.stbc = rs_stbc_allow(mvm, sta, lq_sta); 2930
2931 /* TODO: remove old API when min FW API hits 14 */
2932 if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
2933 rs_stbc_allow(mvm, sta, lq_sta))
2934 rate.stbc = true;
2918 2935
2919 if (is_siso(&rate)) { 2936 if (is_siso(&rate)) {
2920 num_rates = RS_INITIAL_SISO_NUM_RATES; 2937 num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES;
2921 num_retries = RS_HT_VHT_RETRIES_PER_RATE; 2938 num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
2922 } else if (is_mimo(&rate)) { 2939 } else if (is_mimo(&rate)) {
2923 num_rates = RS_INITIAL_MIMO_NUM_RATES; 2940 num_rates = IWL_MVM_RS_INITIAL_MIMO_NUM_RATES;
2924 num_retries = RS_HT_VHT_RETRIES_PER_RATE; 2941 num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
2925 } else { 2942 } else {
2926 num_rates = RS_INITIAL_LEGACY_NUM_RATES; 2943 num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES;
2927 num_retries = RS_LEGACY_RETRIES_PER_RATE; 2944 num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES;
2928 toggle_ant = true; 2945 toggle_ant = true;
2929 } 2946 }
2930 2947
@@ -2935,12 +2952,12 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
2935 rs_get_lower_rate_down_column(lq_sta, &rate); 2952 rs_get_lower_rate_down_column(lq_sta, &rate);
2936 2953
2937 if (is_siso(&rate)) { 2954 if (is_siso(&rate)) {
2938 num_rates = RS_SECONDARY_SISO_NUM_RATES; 2955 num_rates = IWL_MVM_RS_SECONDARY_SISO_NUM_RATES;
2939 num_retries = RS_SECONDARY_SISO_RETRIES; 2956 num_retries = IWL_MVM_RS_SECONDARY_SISO_RETRIES;
2940 lq_cmd->mimo_delim = index; 2957 lq_cmd->mimo_delim = index;
2941 } else if (is_legacy(&rate)) { 2958 } else if (is_legacy(&rate)) {
2942 num_rates = RS_SECONDARY_LEGACY_NUM_RATES; 2959 num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
2943 num_retries = RS_LEGACY_RETRIES_PER_RATE; 2960 num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
2944 } else { 2961 } else {
2945 WARN_ON_ONCE(1); 2962 WARN_ON_ONCE(1);
2946 } 2963 }
@@ -2953,8 +2970,8 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
2953 2970
2954 rs_get_lower_rate_down_column(lq_sta, &rate); 2971 rs_get_lower_rate_down_column(lq_sta, &rate);
2955 2972
2956 num_rates = RS_SECONDARY_LEGACY_NUM_RATES; 2973 num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
2957 num_retries = RS_LEGACY_RETRIES_PER_RATE; 2974 num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
2958 2975
2959 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index, 2976 rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
2960 num_rates, num_retries, valid_tx_ant, 2977 num_rates, num_retries, valid_tx_ant,
@@ -2962,6 +2979,142 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
2962 2979
2963} 2980}
2964 2981
2982struct rs_bfer_active_iter_data {
2983 struct ieee80211_sta *exclude_sta;
2984 struct iwl_mvm_sta *bfer_mvmsta;
2985};
2986
2987static void rs_bfer_active_iter(void *_data,
2988 struct ieee80211_sta *sta)
2989{
2990 struct rs_bfer_active_iter_data *data = _data;
2991 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2992 struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
2993 u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
2994
2995 if (sta == data->exclude_sta)
2996 return;
2997
2998 /* The current sta has BFER allowed */
2999 if (ss_params & LQ_SS_BFER_ALLOWED) {
3000 WARN_ON_ONCE(data->bfer_mvmsta != NULL);
3001
3002 data->bfer_mvmsta = mvmsta;
3003 }
3004}
3005
3006static int rs_bfer_priority(struct iwl_mvm_sta *sta)
3007{
3008 int prio = -1;
3009 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif);
3010
3011 switch (viftype) {
3012 case NL80211_IFTYPE_AP:
3013 case NL80211_IFTYPE_P2P_GO:
3014 prio = 3;
3015 break;
3016 case NL80211_IFTYPE_P2P_CLIENT:
3017 prio = 2;
3018 break;
3019 case NL80211_IFTYPE_STATION:
3020 prio = 1;
3021 break;
3022 default:
3023 WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id);
3024 prio = -1;
3025 }
3026
3027 return prio;
3028}
3029
3030/* Returns >0 if sta1 has a higher BFER priority compared to sta2 */
3031static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1,
3032 struct iwl_mvm_sta *sta2)
3033{
3034 int prio1 = rs_bfer_priority(sta1);
3035 int prio2 = rs_bfer_priority(sta2);
3036
3037 if (prio1 > prio2)
3038 return 1;
3039 if (prio1 < prio2)
3040 return -1;
3041 return 0;
3042}
3043
3044static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
3045 struct ieee80211_sta *sta,
3046 struct iwl_lq_sta *lq_sta,
3047 const struct rs_rate *initial_rate)
3048{
3049 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
3050 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3051 struct rs_bfer_active_iter_data data = {
3052 .exclude_sta = sta,
3053 .bfer_mvmsta = NULL,
3054 };
3055 struct iwl_mvm_sta *bfer_mvmsta = NULL;
3056 u32 ss_params = LQ_SS_PARAMS_VALID;
3057
3058 if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
3059 goto out;
3060
3061 /* Check if forcing the decision is configured.
3062 * Note that SISO is forced by not allowing STBC or BFER
3063 */
3064 if (lq_sta->ss_force == RS_SS_FORCE_STBC)
3065 ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
3066 else if (lq_sta->ss_force == RS_SS_FORCE_BFER)
3067 ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
3068
3069 if (lq_sta->ss_force != RS_SS_FORCE_NONE) {
3070 IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
3071 lq_sta->ss_force);
3072 goto out;
3073 }
3074
3075 if (lq_sta->stbc_capable)
3076 ss_params |= LQ_SS_STBC_1SS_ALLOWED;
3077
3078 if (!lq_sta->bfer_capable)
3079 goto out;
3080
3081 ieee80211_iterate_stations_atomic(mvm->hw,
3082 rs_bfer_active_iter,
3083 &data);
3084 bfer_mvmsta = data.bfer_mvmsta;
3085
3086 /* This code is safe as it doesn't run concurrently for different
3087 * stations. This is guaranteed by the fact that calls to
3088 * ieee80211_tx_status wouldn't run concurrently for a single HW.
3089 */
3090 if (!bfer_mvmsta) {
3091 IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n");
3092
3093 ss_params |= LQ_SS_BFER_ALLOWED;
3094 goto out;
3095 }
3096
3097 IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n",
3098 bfer_mvmsta->sta_id);
3099
3100 /* Disallow BFER on another STA if active and we're a higher priority */
3101 if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
3102 struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
3103 u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
3104
3105 bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
3106 bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
3107 iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
3108
3109 ss_params |= LQ_SS_BFER_ALLOWED;
3110 IWL_DEBUG_RATE(mvm,
3111 "Lower priority BFER sta found (%d). Switch BFER\n",
3112 bfer_mvmsta->sta_id);
3113 }
3114out:
3115 lq_cmd->ss_params = cpu_to_le32(ss_params);
3116}
3117
2965static void rs_fill_lq_cmd(struct iwl_mvm *mvm, 3118static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
2966 struct ieee80211_sta *sta, 3119 struct ieee80211_sta *sta,
2967 struct iwl_lq_sta *lq_sta, 3120 struct iwl_lq_sta *lq_sta,
@@ -2971,9 +3124,9 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
2971 struct iwl_mvm_sta *mvmsta; 3124 struct iwl_mvm_sta *mvmsta;
2972 struct iwl_mvm_vif *mvmvif; 3125 struct iwl_mvm_vif *mvmvif;
2973 3126
2974 lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; 3127 lq_cmd->agg_disable_start_th = IWL_MVM_RS_AGG_DISABLE_START;
2975 lq_cmd->agg_time_limit = 3128 lq_cmd->agg_time_limit =
2976 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); 3129 cpu_to_le16(IWL_MVM_RS_AGG_TIME_LIMIT);
2977 3130
2978#ifdef CONFIG_MAC80211_DEBUGFS 3131#ifdef CONFIG_MAC80211_DEBUGFS
2979 if (lq_sta->pers.dbg_fixed_rate) { 3132 if (lq_sta->pers.dbg_fixed_rate) {
@@ -2988,6 +3141,9 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
2988 3141
2989 rs_build_rates_table(mvm, sta, lq_sta, initial_rate); 3142 rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
2990 3143
3144 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS)
3145 rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
3146
2991 if (num_of_ant(initial_rate->ant) == 1) 3147 if (num_of_ant(initial_rate->ant) == 1)
2992 lq_cmd->single_stream_ant_msk = initial_rate->ant; 3148 lq_cmd->single_stream_ant_msk = initial_rate->ant;
2993 3149
@@ -3167,9 +3323,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3167 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 3323 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3168 lq_sta->pers.dbg_fixed_rate); 3324 lq_sta->pers.dbg_fixed_rate);
3169 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", 3325 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3170 (mvm->fw->valid_tx_ant & ANT_A) ? "ANT_A," : "", 3326 (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
3171 (mvm->fw->valid_tx_ant & ANT_B) ? "ANT_B," : "", 3327 (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
3172 (mvm->fw->valid_tx_ant & ANT_C) ? "ANT_C" : ""); 3328 (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
3173 desc += sprintf(buff+desc, "lq type %s\n", 3329 desc += sprintf(buff+desc, "lq type %s\n",
3174 (is_legacy(rate)) ? "legacy" : 3330 (is_legacy(rate)) ? "legacy" :
3175 is_vht(rate) ? "VHT" : "HT"); 3331 is_vht(rate) ? "VHT" : "HT");
@@ -3361,9 +3517,73 @@ static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
3361 .llseek = default_llseek, 3517 .llseek = default_llseek,
3362}; 3518};
3363 3519
3520static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
3521 char __user *user_buf,
3522 size_t count, loff_t *ppos)
3523{
3524 struct iwl_lq_sta *lq_sta = file->private_data;
3525 char buf[12];
3526 int bufsz = sizeof(buf);
3527 int pos = 0;
3528 static const char * const ss_force_name[] = {
3529 [RS_SS_FORCE_NONE] = "none",
3530 [RS_SS_FORCE_STBC] = "stbc",
3531 [RS_SS_FORCE_BFER] = "bfer",
3532 [RS_SS_FORCE_SISO] = "siso",
3533 };
3534
3535 pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
3536 ss_force_name[lq_sta->ss_force]);
3537 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
3538}
3539
3540static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
3541 size_t count, loff_t *ppos)
3542{
3543 struct iwl_mvm *mvm = lq_sta->pers.drv;
3544 int ret = 0;
3545
3546 if (!strncmp("none", buf, 4)) {
3547 lq_sta->ss_force = RS_SS_FORCE_NONE;
3548 } else if (!strncmp("siso", buf, 4)) {
3549 lq_sta->ss_force = RS_SS_FORCE_SISO;
3550 } else if (!strncmp("stbc", buf, 4)) {
3551 if (lq_sta->stbc_capable) {
3552 lq_sta->ss_force = RS_SS_FORCE_STBC;
3553 } else {
3554 IWL_ERR(mvm,
3555 "can't force STBC. peer doesn't support\n");
3556 ret = -EINVAL;
3557 }
3558 } else if (!strncmp("bfer", buf, 4)) {
3559 if (lq_sta->bfer_capable) {
3560 lq_sta->ss_force = RS_SS_FORCE_BFER;
3561 } else {
3562 IWL_ERR(mvm,
3563 "can't force BFER. peer doesn't support\n");
3564 ret = -EINVAL;
3565 }
3566 } else {
3567 IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n");
3568 ret = -EINVAL;
3569 }
3570 return ret ?: count;
3571}
3572
3573#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
3574 _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
3575#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \
3576 if (!debugfs_create_file(#name, mode, parent, lq_sta, \
3577 &iwl_dbgfs_##name##_ops)) \
3578 goto err; \
3579 } while (0)
3580
3581MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
3582
3364static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) 3583static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
3365{ 3584{
3366 struct iwl_lq_sta *lq_sta = mvm_sta; 3585 struct iwl_lq_sta *lq_sta = mvm_sta;
3586
3367 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, 3587 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
3368 lq_sta, &rs_sta_dbgfs_scale_table_ops); 3588 lq_sta, &rs_sta_dbgfs_scale_table_ops);
3369 debugfs_create_file("rate_stats_table", S_IRUSR, dir, 3589 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
@@ -3374,6 +3594,11 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
3374 &lq_sta->tx_agg_tid_en); 3594 &lq_sta->tx_agg_tid_en);
3375 debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir, 3595 debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
3376 &lq_sta->pers.dbg_fixed_txp_reduction); 3596 &lq_sta->pers.dbg_fixed_txp_reduction);
3597
3598 MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, S_IRUSR | S_IWUSR);
3599 return;
3600err:
3601 IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
3377} 3602}
3378 3603
3379static void rs_remove_debugfs(void *mvm, void *mvm_sta) 3604static void rs_remove_debugfs(void *mvm, void *mvm_sta)
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index defd70a6d9e6..dc4ef3dfafe1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -137,42 +137,10 @@ enum {
137 137
138#define IWL_INVALID_VALUE -1 138#define IWL_INVALID_VALUE -1
139 139
140#define IWL_MIN_RSSI_VAL -100
141#define IWL_MAX_RSSI_VAL 0
142
143/* These values specify how many Tx frame attempts before
144 * searching for a new modulation mode */
145#define IWL_LEGACY_FAILURE_LIMIT 160
146#define IWL_LEGACY_SUCCESS_LIMIT 480
147#define IWL_LEGACY_TABLE_COUNT 160
148
149#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
150#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
151#define IWL_NONE_LEGACY_TABLE_COUNT 1500
152
153/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
154#define IWL_RS_GOOD_RATIO 12800 /* 100% */
155#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
156#define IWL_RATE_HIGH_TH 10880 /* 85% */
157#define IWL_RATE_INCREASE_TH 6400 /* 50% */
158#define RS_SR_FORCE_DECREASE 1920 /* 15% */
159#define RS_SR_NO_DECREASE 10880 /* 85% */
160
161#define TPC_SR_FORCE_INCREASE 9600 /* 75% */
162#define TPC_SR_NO_INCREASE 10880 /* 85% */
163#define TPC_TX_POWER_STEP 3
164#define TPC_MAX_REDUCTION 15 140#define TPC_MAX_REDUCTION 15
165#define TPC_NO_REDUCTION 0 141#define TPC_NO_REDUCTION 0
166#define TPC_INVALID 0xff 142#define TPC_INVALID 0xff
167 143
168#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
169#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
170#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
171
172#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
173#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
174#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
175
176#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) 144#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
177#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) 145#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
178#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) 146#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
@@ -181,14 +149,7 @@ enum {
181 149
182/* load per tid defines for A-MPDU activation */ 150/* load per tid defines for A-MPDU activation */
183#define IWL_AGG_TPT_THREHOLD 0 151#define IWL_AGG_TPT_THREHOLD 0
184#define IWL_AGG_LOAD_THRESHOLD 10
185#define IWL_AGG_ALL_TID 0xff 152#define IWL_AGG_ALL_TID 0xff
186#define TID_QUEUE_CELL_SPACING 50 /*mS */
187#define TID_QUEUE_MAX_SIZE 20
188#define TID_ROUND_VALUE 5 /* mS */
189
190#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
191#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
192 153
193enum iwl_table_type { 154enum iwl_table_type {
194 LQ_NONE, 155 LQ_NONE,
@@ -279,6 +240,13 @@ enum rs_column {
279 RS_COLUMN_INVALID, 240 RS_COLUMN_INVALID,
280}; 241};
281 242
243enum rs_ss_force_opt {
244 RS_SS_FORCE_NONE = 0,
245 RS_SS_FORCE_STBC,
246 RS_SS_FORCE_BFER,
247 RS_SS_FORCE_SISO,
248};
249
282/* Packet stats per rate */ 250/* Packet stats per rate */
283struct rs_rate_stats { 251struct rs_rate_stats {
284 u64 success; 252 u64 success;
@@ -332,7 +300,9 @@ struct iwl_lq_sta {
332 u64 last_tx; 300 u64 last_tx;
333 bool is_vht; 301 bool is_vht;
334 bool ldpc; /* LDPC Rx is supported by the STA */ 302 bool ldpc; /* LDPC Rx is supported by the STA */
335 bool stbc; /* Tx STBC is supported by chip and Rx by STA */ 303 bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */
304 bool bfer_capable; /* Remote supports beamformee and we BFer */
305
336 enum ieee80211_band band; 306 enum ieee80211_band band;
337 307
338 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 308 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
@@ -361,6 +331,9 @@ struct iwl_lq_sta {
361 /* tx power reduce for this sta */ 331 /* tx power reduce for this sta */
362 int tpc_reduce; 332 int tpc_reduce;
363 333
334 /* force STBC/BFER/SISO for testing */
335 enum rs_ss_force_opt ss_force;
336
364 /* persistent fields - initialized only once - keep last! */ 337 /* persistent fields - initialized only once - keep last! */
365 struct lq_sta_pers { 338 struct lq_sta_pers {
366#ifdef CONFIG_MAC80211_DEBUGFS 339#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 94b6e7297a1e..f922131b4eab 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -407,7 +407,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
407 } 407 }
408 408
409#ifdef CONFIG_IWLWIFI_DEBUGFS 409#ifdef CONFIG_IWLWIFI_DEBUGFS
410 iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags, 410 iwl_mvm_update_frame_stats(mvm, rate_n_flags,
411 rx_status->flag & RX_FLAG_AMPDU_DETAILS); 411 rx_status->flag & RX_FLAG_AMPDU_DETAILS);
412#endif 412#endif
413 iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status, 413 iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
@@ -511,13 +511,17 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
511{ 511{
512 struct iwl_rx_packet *pkt = rxb_addr(rxb); 512 struct iwl_rx_packet *pkt = rxb_addr(rxb);
513 struct iwl_notif_statistics *stats = (void *)&pkt->data; 513 struct iwl_notif_statistics *stats = (void *)&pkt->data;
514 struct mvm_statistics_general_common *common = &stats->general.common;
515 struct iwl_mvm_stat_data data = { 514 struct iwl_mvm_stat_data data = {
516 .stats = stats, 515 .stats = stats,
517 .mvm = mvm, 516 .mvm = mvm,
518 }; 517 };
519 518
520 iwl_mvm_tt_temp_changed(mvm, le32_to_cpu(common->temperature)); 519 /* Only handle rx statistics temperature changes if async temp
520 * notifications are not supported
521 */
522 if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
523 iwl_mvm_tt_temp_changed(mvm,
524 le32_to_cpu(stats->general.radio_temperature));
521 525
522 iwl_mvm_update_rx_statistics(mvm, stats); 526 iwl_mvm_update_rx_statistics(mvm, stats);
523 527
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 844bf7c4c8de..7e9aa3cb3254 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -99,7 +99,7 @@ static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
99{ 99{
100 if (mvm->scan_rx_ant != ANT_NONE) 100 if (mvm->scan_rx_ant != ANT_NONE)
101 return mvm->scan_rx_ant; 101 return mvm->scan_rx_ant;
102 return mvm->fw->valid_rx_ant; 102 return iwl_mvm_get_valid_rx_ant(mvm);
103} 103}
104 104
105static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) 105static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
@@ -130,7 +130,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
130 u32 tx_ant; 130 u32 tx_ant;
131 131
132 mvm->scan_last_antenna_idx = 132 mvm->scan_last_antenna_idx =
133 iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant, 133 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
134 mvm->scan_last_antenna_idx); 134 mvm->scan_last_antenna_idx);
135 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; 135 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
136 136
@@ -290,11 +290,11 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
290 struct ieee80211_vif *vif) 290 struct ieee80211_vif *vif)
291{ 291{
292 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 292 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
293 bool *global_bound = data; 293 int *global_cnt = data;
294 294
295 if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && 295 if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
296 mvmvif->phy_ctxt->id < MAX_PHYS) 296 mvmvif->phy_ctxt->id < MAX_PHYS)
297 *global_bound = true; 297 *global_cnt += 1;
298} 298}
299 299
300static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, 300static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
@@ -302,27 +302,31 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
302 int n_ssids, u32 flags, 302 int n_ssids, u32 flags,
303 struct iwl_mvm_scan_params *params) 303 struct iwl_mvm_scan_params *params)
304{ 304{
305 bool global_bound = false; 305 int global_cnt = 0;
306 enum ieee80211_band band; 306 enum ieee80211_band band;
307 u8 frag_passive_dwell = 0; 307 u8 frag_passive_dwell = 0;
308 308
309 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 309 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
310 IEEE80211_IFACE_ITER_NORMAL, 310 IEEE80211_IFACE_ITER_NORMAL,
311 iwl_mvm_scan_condition_iterator, 311 iwl_mvm_scan_condition_iterator,
312 &global_bound); 312 &global_cnt);
313 313
314 if (!global_bound) 314 if (!global_cnt)
315 goto not_bound; 315 goto not_bound;
316 316
317 params->suspend_time = 30; 317 params->suspend_time = 30;
318 params->max_out_time = 170; 318 params->max_out_time = 120;
319 319
320 if (iwl_mvm_low_latency(mvm)) { 320 if (iwl_mvm_low_latency(mvm)) {
321 if (mvm->fw->ucode_capa.api[0] & 321 if (mvm->fw->ucode_capa.api[0] &
322 IWL_UCODE_TLV_API_FRAGMENTED_SCAN) { 322 IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
323 params->suspend_time = 105; 323 params->suspend_time = 105;
324 params->max_out_time = 70; 324 /*
325 frag_passive_dwell = 20; 325 * If there is more than one active interface make
326 * passive scan more fragmented.
327 */
328 frag_passive_dwell = (global_cnt < 2) ? 40 : 20;
329 params->max_out_time = frag_passive_dwell;
326 } else { 330 } else {
327 params->suspend_time = 120; 331 params->suspend_time = 120;
328 params->max_out_time = 120; 332 params->max_out_time = 120;
@@ -539,6 +543,19 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
539 return 0; 543 return 0;
540} 544}
541 545
546int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
547 struct iwl_rx_cmd_buffer *rxb,
548 struct iwl_device_cmd *cmd)
549{
550 struct iwl_rx_packet *pkt = rxb_addr(rxb);
551 struct iwl_scan_complete_notif *notif = (void *)pkt->data;
552
553 IWL_DEBUG_SCAN(mvm,
554 "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
555 notif->status, notif->scanned_channels);
556 return 0;
557}
558
542int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, 559int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
543 struct iwl_device_cmd *cmd) 560 struct iwl_device_cmd *cmd)
544{ 561{
@@ -687,7 +704,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
687 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 704 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
688 } 705 }
689 706
690 mvm->last_ebs_successful = !ebs_status; 707 if (ebs_status)
708 mvm->last_ebs_successful = false;
691 709
692 return 0; 710 return 0;
693} 711}
@@ -1480,6 +1498,11 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
1480 if (req->n_ssids == 0) 1498 if (req->n_ssids == 0)
1481 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE; 1499 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
1482 1500
1501#ifdef CONFIG_IWLWIFI_DEBUGFS
1502 if (mvm->scan_iter_notif_enabled)
1503 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
1504#endif
1505
1483 cmd->scan_flags |= cpu_to_le32(flags); 1506 cmd->scan_flags |= cpu_to_le32(flags);
1484 1507
1485 cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band); 1508 cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
@@ -1641,7 +1664,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1641 SCAN_CONFIG_FLAG_SET_MAC_ADDR | 1664 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
1642 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS| 1665 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
1643 SCAN_CONFIG_N_CHANNELS(num_channels)); 1666 SCAN_CONFIG_N_CHANNELS(num_channels));
1644 scan_config->tx_chains = cpu_to_le32(mvm->fw->valid_tx_ant); 1667 scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1645 scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); 1668 scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1646 scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm); 1669 scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1647 scan_config->out_of_channel_time = cpu_to_le32(170); 1670 scan_config->out_of_channel_time = cpu_to_le32(170);
@@ -1660,10 +1683,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1660 1683
1661 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ]; 1684 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
1662 for (i = 0; i < band->n_channels; i++, j++) 1685 for (i = 0; i < band->n_channels; i++, j++)
1663 scan_config->channel_array[j] = band->channels[i].center_freq; 1686 scan_config->channel_array[j] = band->channels[i].hw_value;
1664 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; 1687 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
1665 for (i = 0; i < band->n_channels; i++, j++) 1688 for (i = 0; i < band->n_channels; i++, j++)
1666 scan_config->channel_array[j] = band->channels[i].center_freq; 1689 scan_config->channel_array[j] = band->channels[i].hw_value;
1667 1690
1668 cmd.data[0] = scan_config; 1691 cmd.data[0] = scan_config;
1669 cmd.len[0] = cmd_size; 1692 cmd.len[0] = cmd_size;
@@ -1840,6 +1863,13 @@ int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1840 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL; 1863 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1841 1864
1842 cmd->general_flags = cpu_to_le32(flags); 1865 cmd->general_flags = cpu_to_le32(flags);
1866
1867 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
1868 mvm->last_ebs_successful)
1869 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1870 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1871 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1872
1843 cmd->n_channels = req->req.n_channels; 1873 cmd->n_channels = req->req.n_channels;
1844 1874
1845 for (i = 0; i < req->req.n_ssids; i++) 1875 for (i = 0; i < req->req.n_ssids; i++)
@@ -2003,7 +2033,9 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
2003 notif->ebs_status == IWL_SCAN_EBS_SUCCESS ? 2033 notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
2004 "success" : "failed"); 2034 "success" : "failed");
2005 2035
2006 mvm->last_ebs_successful = !notif->ebs_status; 2036 if (notif->ebs_status)
2037 mvm->last_ebs_successful = false;
2038
2007 mvm->scan_uid[uid_idx] = 0; 2039 mvm->scan_uid[uid_idx] = 0;
2008 2040
2009 if (!sched) { 2041 if (!sched) {
@@ -2036,10 +2068,14 @@ static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
2036 2068
2037 /* 2069 /*
2038 * Clear scan uid of scans that was aborted from above and completed 2070 * Clear scan uid of scans that was aborted from above and completed
2039 * in FW so the RX handler does nothing. 2071 * in FW so the RX handler does nothing. Set last_ebs_successful here if
2072 * needed.
2040 */ 2073 */
2041 scan_done->mvm->scan_uid[uid_idx] = 0; 2074 scan_done->mvm->scan_uid[uid_idx] = 0;
2042 2075
2076 if (notif->ebs_status)
2077 scan_done->mvm->last_ebs_successful = false;
2078
2043 return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type); 2079 return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
2044} 2080}
2045 2081
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d86fe432e51f..5c23cddaaae3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -99,7 +99,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
99int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 99int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
100 bool update) 100 bool update)
101{ 101{
102 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 102 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
103 struct iwl_mvm_add_sta_cmd add_sta_cmd = { 103 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
104 .sta_id = mvm_sta->sta_id, 104 .sta_id = mvm_sta->sta_id,
105 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 105 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
@@ -209,6 +209,9 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
209{ 209{
210 unsigned long used_hw_queues; 210 unsigned long used_hw_queues;
211 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 211 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
212 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
213 mvm->cfg->base_params->wd_timeout :
214 IWL_WATCHDOG_DISABLED;
212 u32 ac; 215 u32 ac;
213 216
214 lockdep_assert_held(&mvm->mutex); 217 lockdep_assert_held(&mvm->mutex);
@@ -232,7 +235,7 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
232 /* Found a place for all queues - enable them */ 235 /* Found a place for all queues - enable them */
233 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 236 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
234 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], 237 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
235 iwl_mvm_ac_to_tx_fifo[ac]); 238 iwl_mvm_ac_to_tx_fifo[ac], wdg_timeout);
236 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); 239 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
237 } 240 }
238 241
@@ -250,8 +253,8 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
250 253
251 /* disable the TDLS STA-specific queues */ 254 /* disable the TDLS STA-specific queues */
252 sta_msk = mvmsta->tfd_queue_msk; 255 sta_msk = mvmsta->tfd_queue_msk;
253 for_each_set_bit(i, &sta_msk, sizeof(sta_msk)) 256 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
254 iwl_mvm_disable_txq(mvm, i); 257 iwl_mvm_disable_txq(mvm, i, 0);
255} 258}
256 259
257int iwl_mvm_add_sta(struct iwl_mvm *mvm, 260int iwl_mvm_add_sta(struct iwl_mvm *mvm,
@@ -259,7 +262,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
259 struct ieee80211_sta *sta) 262 struct ieee80211_sta *sta)
260{ 263{
261 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 264 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
262 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 265 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
263 int i, ret, sta_id; 266 int i, ret, sta_id;
264 267
265 lockdep_assert_held(&mvm->mutex); 268 lockdep_assert_held(&mvm->mutex);
@@ -464,8 +467,8 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
464 if (mvm->tfd_drained[sta_id]) { 467 if (mvm->tfd_drained[sta_id]) {
465 unsigned long i, msk = mvm->tfd_drained[sta_id]; 468 unsigned long i, msk = mvm->tfd_drained[sta_id];
466 469
467 for_each_set_bit(i, &msk, sizeof(msk)) 470 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
468 iwl_mvm_disable_txq(mvm, i); 471 iwl_mvm_disable_txq(mvm, i, 0);
469 472
470 mvm->tfd_drained[sta_id] = 0; 473 mvm->tfd_drained[sta_id] = 0;
471 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", 474 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
@@ -481,7 +484,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
481 struct ieee80211_sta *sta) 484 struct ieee80211_sta *sta)
482{ 485{
483 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 486 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
484 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 487 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
485 int ret; 488 int ret;
486 489
487 lockdep_assert_held(&mvm->mutex); 490 lockdep_assert_held(&mvm->mutex);
@@ -626,13 +629,16 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
626 629
627int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) 630int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
628{ 631{
632 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
633 mvm->cfg->base_params->wd_timeout :
634 IWL_WATCHDOG_DISABLED;
629 int ret; 635 int ret;
630 636
631 lockdep_assert_held(&mvm->mutex); 637 lockdep_assert_held(&mvm->mutex);
632 638
633 /* Map Aux queue to fifo - needs to happen before adding Aux station */ 639 /* Map Aux queue to fifo - needs to happen before adding Aux station */
634 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, 640 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
635 IWL_MVM_TX_FIFO_MCAST); 641 IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
636 642
637 /* Allocate aux station and assign to it the aux queue */ 643 /* Allocate aux station and assign to it the aux queue */
638 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), 644 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
@@ -774,7 +780,7 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
774int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 780int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
775 int tid, u16 ssn, bool start) 781 int tid, u16 ssn, bool start)
776{ 782{
777 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 783 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
778 struct iwl_mvm_add_sta_cmd cmd = {}; 784 struct iwl_mvm_add_sta_cmd cmd = {};
779 int ret; 785 int ret;
780 u32 status; 786 u32 status;
@@ -834,7 +840,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
834static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 840static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
835 int tid, u8 queue, bool start) 841 int tid, u8 queue, bool start)
836{ 842{
837 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 843 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
838 struct iwl_mvm_add_sta_cmd cmd = {}; 844 struct iwl_mvm_add_sta_cmd cmd = {};
839 int ret; 845 int ret;
840 u32 status; 846 u32 status;
@@ -965,6 +971,9 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
965{ 971{
966 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 972 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
967 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 973 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
974 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
975 mvm->cfg->base_params->wd_timeout :
976 IWL_WATCHDOG_DISABLED;
968 int queue, fifo, ret; 977 int queue, fifo, ret;
969 u16 ssn; 978 u16 ssn;
970 979
@@ -988,7 +997,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
988 return -EIO; 997 return -EIO;
989 998
990 iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid, 999 iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
991 buf_size, ssn); 1000 buf_size, ssn, wdg_timeout);
992 1001
993 /* 1002 /*
994 * Even though in theory the peer could have different 1003 * Even though in theory the peer could have different
@@ -1058,7 +1067,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1058 1067
1059 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 1068 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1060 1069
1061 iwl_mvm_disable_txq(mvm, txq_id); 1070 iwl_mvm_disable_txq(mvm, txq_id, 0);
1062 return 0; 1071 return 0;
1063 case IWL_AGG_STARTING: 1072 case IWL_AGG_STARTING:
1064 case IWL_EMPTYING_HW_QUEUE_ADDBA: 1073 case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1116,7 +1125,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1116 1125
1117 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 1126 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
1118 1127
1119 iwl_mvm_disable_txq(mvm, tid_data->txq_id); 1128 iwl_mvm_disable_txq(mvm, tid_data->txq_id, 0);
1120 } 1129 }
1121 1130
1122 mvm->queue_to_mac80211[tid_data->txq_id] = 1131 mvm->queue_to_mac80211[tid_data->txq_id] =
@@ -1144,10 +1153,10 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
1144static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, 1153static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
1145 struct ieee80211_sta *sta) 1154 struct ieee80211_sta *sta)
1146{ 1155{
1147 struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv; 1156 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1148 1157
1149 if (sta) { 1158 if (sta) {
1150 struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; 1159 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1151 1160
1152 return mvm_sta->sta_id; 1161 return mvm_sta->sta_id;
1153 } 1162 }
@@ -1196,6 +1205,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1196 break; 1205 break;
1197 case WLAN_CIPHER_SUITE_WEP104: 1206 case WLAN_CIPHER_SUITE_WEP104:
1198 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); 1207 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
1208 /* fall through */
1199 case WLAN_CIPHER_SUITE_WEP40: 1209 case WLAN_CIPHER_SUITE_WEP40:
1200 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); 1210 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
1201 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); 1211 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
@@ -1280,7 +1290,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
1280 struct ieee80211_vif *vif, 1290 struct ieee80211_vif *vif,
1281 struct ieee80211_sta *sta) 1291 struct ieee80211_sta *sta)
1282{ 1292{
1283 struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv; 1293 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1284 1294
1285 if (sta) 1295 if (sta)
1286 return sta->addr; 1296 return sta->addr;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c
index c0e00bae5bd0..a87b506c8c72 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c
@@ -64,6 +64,8 @@
64#include <linux/etherdevice.h> 64#include <linux/etherdevice.h>
65#include "mvm.h" 65#include "mvm.h"
66#include "time-event.h" 66#include "time-event.h"
67#include "iwl-io.h"
68#include "iwl-prph.h"
67 69
68#define TU_TO_US(x) (x * 1024) 70#define TU_TO_US(x) (x * 1024)
69#define TU_TO_MS(x) (TU_TO_US(x) / 1000) 71#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
@@ -228,6 +230,8 @@ iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
228 return "IDLE"; 230 return "IDLE";
229 case IWL_MVM_TDLS_SW_REQ_SENT: 231 case IWL_MVM_TDLS_SW_REQ_SENT:
230 return "REQ SENT"; 232 return "REQ SENT";
233 case IWL_MVM_TDLS_SW_RESP_RCVD:
234 return "RESP RECEIVED";
231 case IWL_MVM_TDLS_SW_REQ_RCVD: 235 case IWL_MVM_TDLS_SW_REQ_RCVD:
232 return "REQ RECEIVED"; 236 return "REQ RECEIVED";
233 case IWL_MVM_TDLS_SW_ACTIVE: 237 case IWL_MVM_TDLS_SW_ACTIVE:
@@ -248,6 +252,11 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
248 iwl_mvm_tdls_cs_state_str(state)); 252 iwl_mvm_tdls_cs_state_str(state));
249 mvm->tdls_cs.state = state; 253 mvm->tdls_cs.state = state;
250 254
255 /* we only send requests to our switching peer - update sent time */
256 if (state == IWL_MVM_TDLS_SW_REQ_SENT)
257 mvm->tdls_cs.peer.sent_timestamp =
258 iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
259
251 if (state == IWL_MVM_TDLS_SW_IDLE) 260 if (state == IWL_MVM_TDLS_SW_IDLE)
252 mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT; 261 mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
253} 262}
@@ -300,7 +309,7 @@ out:
300static int 309static int
301iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, 310iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
302 enum iwl_tdls_channel_switch_type type, 311 enum iwl_tdls_channel_switch_type type,
303 const u8 *peer, bool peer_initiator) 312 const u8 *peer, bool peer_initiator, u32 timestamp)
304{ 313{
305 bool same_peer = false; 314 bool same_peer = false;
306 int ret = 0; 315 int ret = 0;
@@ -325,17 +334,30 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
325 ret = -EINVAL; 334 ret = -EINVAL;
326 break; 335 break;
327 case IWL_MVM_TDLS_SW_REQ_SENT: 336 case IWL_MVM_TDLS_SW_REQ_SENT:
337 /* only allow requests from the same peer */
338 if (!same_peer)
339 ret = -EBUSY;
340 else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
341 !peer_initiator)
342 /*
343 * We received a ch-switch request while an outgoing
344 * one is pending. Allow it if the peer is the link
345 * initiator.
346 */
347 ret = -EBUSY;
348 else if (type == TDLS_SEND_CHAN_SW_REQ)
349 /* wait for idle before sending another request */
350 ret = -EBUSY;
351 else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
352 /* we got a stale response - ignore it */
353 ret = -EINVAL;
354 break;
355 case IWL_MVM_TDLS_SW_RESP_RCVD:
328 /* 356 /*
329 * We received a ch-switch request while an outgoing one is 357 * we are waiting for the FW to give an "active" notification,
330 * pending. Allow it to proceed if the other peer is the same 358 * so ignore requests in the meantime
331 * one we sent to, and we are not the link initiator.
332 */ 359 */
333 if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH) { 360 ret = -EBUSY;
334 if (!same_peer)
335 ret = -EBUSY;
336 else if (!peer_initiator) /* we are the initiator */
337 ret = -EBUSY;
338 }
339 break; 361 break;
340 case IWL_MVM_TDLS_SW_REQ_RCVD: 362 case IWL_MVM_TDLS_SW_REQ_RCVD:
341 /* as above, allow the link initiator to proceed */ 363 /* as above, allow the link initiator to proceed */
@@ -349,9 +371,12 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
349 } 371 }
350 break; 372 break;
351 case IWL_MVM_TDLS_SW_ACTIVE: 373 case IWL_MVM_TDLS_SW_ACTIVE:
352 /* we don't allow initiations during active channel switch */ 374 /*
353 if (type == TDLS_SEND_CHAN_SW_REQ) 375 * the only valid request when active is a request to return
354 ret = -EINVAL; 376 * to the base channel by the current off-channel peer
377 */
378 if (type != TDLS_MOVE_CH || !same_peer)
379 ret = -EBUSY;
355 break; 380 break;
356 } 381 }
357 382
@@ -384,7 +409,8 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
384 409
385 lockdep_assert_held(&mvm->mutex); 410 lockdep_assert_held(&mvm->mutex);
386 411
387 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator); 412 ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
413 timestamp);
388 if (ret) 414 if (ret)
389 return ret; 415 return ret;
390 416
@@ -473,6 +499,8 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
473 type == TDLS_SEND_CHAN_SW_REQ ? 499 type == TDLS_SEND_CHAN_SW_REQ ?
474 IWL_MVM_TDLS_SW_REQ_SENT : 500 IWL_MVM_TDLS_SW_REQ_SENT :
475 IWL_MVM_TDLS_SW_REQ_RCVD); 501 IWL_MVM_TDLS_SW_REQ_RCVD);
502 } else {
503 iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
476 } 504 }
477 505
478out: 506out:
@@ -657,12 +685,15 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
657 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 685 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
658 enum iwl_tdls_channel_switch_type type; 686 enum iwl_tdls_channel_switch_type type;
659 unsigned int delay; 687 unsigned int delay;
688 const char *action_str =
689 params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
690 "REQ" : "RESP";
660 691
661 mutex_lock(&mvm->mutex); 692 mutex_lock(&mvm->mutex);
662 693
663 IWL_DEBUG_TDLS(mvm, 694 IWL_DEBUG_TDLS(mvm,
664 "Received TDLS ch switch action %d from %pM status %d\n", 695 "Received TDLS ch switch action %s from %pM status %d\n",
665 params->action_code, params->sta->addr, params->status); 696 action_str, params->sta->addr, params->status);
666 697
667 /* 698 /*
668 * we got a non-zero status from a peer we were switching to - move to 699 * we got a non-zero status from a peer we were switching to - move to
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 2b1e61fac34a..ba615ad2176c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -69,6 +69,7 @@
69 69
70static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) 70static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
71{ 71{
72 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
72 u32 duration = mvm->thermal_throttle.params->ct_kill_duration; 73 u32 duration = mvm->thermal_throttle.params->ct_kill_duration;
73 74
74 if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) 75 if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
@@ -77,12 +78,15 @@ static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
77 IWL_ERR(mvm, "Enter CT Kill\n"); 78 IWL_ERR(mvm, "Enter CT Kill\n");
78 iwl_mvm_set_hw_ctkill_state(mvm, true); 79 iwl_mvm_set_hw_ctkill_state(mvm, true);
79 80
81 tt->throttle = false;
82 tt->dynamic_smps = false;
83
80 /* Don't schedule an exit work if we're in test mode, since 84 /* Don't schedule an exit work if we're in test mode, since
81 * the temperature will not change unless we manually set it 85 * the temperature will not change unless we manually set it
82 * again (or disable testing). 86 * again (or disable testing).
83 */ 87 */
84 if (!mvm->temperature_test) 88 if (!mvm->temperature_test)
85 schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, 89 schedule_delayed_work(&tt->ct_kill_exit,
86 round_jiffies_relative(duration * HZ)); 90 round_jiffies_relative(duration * HZ));
87} 91}
88 92
@@ -452,6 +456,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
452 tt->params = &iwl7000_tt_params; 456 tt->params = &iwl7000_tt_params;
453 457
454 tt->throttle = false; 458 tt->throttle = false;
459 tt->dynamic_smps = false;
455 tt->min_backoff = min_backoff; 460 tt->min_backoff = min_backoff;
456 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); 461 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
457} 462}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index c59d07567d90..07304e1fd64a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -220,7 +220,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
220 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); 220 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
221 221
222 mvm->mgmt_last_antenna_idx = 222 mvm->mgmt_last_antenna_idx =
223 iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant, 223 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
224 mvm->mgmt_last_antenna_idx); 224 mvm->mgmt_last_antenna_idx);
225 225
226 if (info->band == IEEE80211_BAND_2GHZ && 226 if (info->band == IEEE80211_BAND_2GHZ &&
@@ -507,7 +507,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
507 IWL_DEBUG_TX_QUEUES(mvm, 507 IWL_DEBUG_TX_QUEUES(mvm,
508 "Can continue DELBA flow ssn = next_recl = %d\n", 508 "Can continue DELBA flow ssn = next_recl = %d\n",
509 tid_data->next_reclaimed); 509 tid_data->next_reclaimed);
510 iwl_mvm_disable_txq(mvm, tid_data->txq_id); 510 iwl_mvm_disable_txq(mvm, tid_data->txq_id, CMD_ASYNC);
511 tid_data->state = IWL_AGG_OFF; 511 tid_data->state = IWL_AGG_OFF;
512 /* 512 /*
513 * we can't hold the mutex - but since we are after a sequence 513 * we can't hold the mutex - but since we are after a sequence
@@ -667,7 +667,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
667 667
668 /* Single frame failure in an AMPDU queue => send BAR */ 668 /* Single frame failure in an AMPDU queue => send BAR */
669 if (txq_id >= mvm->first_agg_queue && 669 if (txq_id >= mvm->first_agg_queue &&
670 !(info->flags & IEEE80211_TX_STAT_ACK)) 670 !(info->flags & IEEE80211_TX_STAT_ACK) &&
671 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
671 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 672 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
672 673
673 /* W/A FW bug: seq_ctl is wrong when the status isn't success */ 674 /* W/A FW bug: seq_ctl is wrong when the status isn't success */
@@ -930,6 +931,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
930 sta_id = ba_notif->sta_id; 931 sta_id = ba_notif->sta_id;
931 tid = ba_notif->tid; 932 tid = ba_notif->tid;
932 933
934 if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
935 tid >= IWL_MAX_TID_COUNT,
936 "sta_id %d tid %d", sta_id, tid))
937 return 0;
938
933 rcu_read_lock(); 939 rcu_read_lock();
934 940
935 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 941 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 917431e30f74..8decf9953229 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -432,7 +432,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
432 mvm->status, table.valid); 432 mvm->status, table.valid);
433 } 433 }
434 434
435 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, 435 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
436 desc_lookup(table.error_id)); 436 desc_lookup(table.error_id));
437 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); 437 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
438 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); 438 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
@@ -531,49 +531,50 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
531} 531}
532 532
533void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, 533void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
534 const struct iwl_trans_txq_scd_cfg *cfg) 534 const struct iwl_trans_txq_scd_cfg *cfg,
535 unsigned int wdg_timeout)
535{ 536{
536 if (iwl_mvm_is_dqa_supported(mvm)) { 537 struct iwl_scd_txq_cfg_cmd cmd = {
537 struct iwl_scd_txq_cfg_cmd cmd = { 538 .scd_queue = queue,
538 .scd_queue = queue, 539 .enable = 1,
539 .enable = 1, 540 .window = cfg->frame_limit,
540 .window = cfg->frame_limit, 541 .sta_id = cfg->sta_id,
541 .sta_id = cfg->sta_id, 542 .ssn = cpu_to_le16(ssn),
542 .ssn = cpu_to_le16(ssn), 543 .tx_fifo = cfg->fifo,
543 .tx_fifo = cfg->fifo, 544 .aggregate = cfg->aggregate,
544 .aggregate = cfg->aggregate, 545 .tid = cfg->tid,
545 .flags = IWL_SCD_FLAGS_DQA_ENABLED, 546 };
546 .tid = cfg->tid, 547
547 .control = IWL_SCD_CONTROL_SET_SSN, 548 if (!iwl_mvm_is_scd_cfg_supported(mvm)) {
548 }; 549 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, cfg,
549 int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, 550 wdg_timeout);
550 sizeof(cmd), &cmd); 551 return;
551 if (ret)
552 IWL_ERR(mvm,
553 "Failed to configure queue %d on FIFO %d\n",
554 queue, cfg->fifo);
555 } 552 }
556 553
557 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, 554 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
558 iwl_mvm_is_dqa_supported(mvm) ? NULL : cfg); 555 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
556 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
559} 557}
560 558
561void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue) 559void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags)
562{ 560{
563 iwl_trans_txq_disable(mvm->trans, queue, 561 struct iwl_scd_txq_cfg_cmd cmd = {
564 !iwl_mvm_is_dqa_supported(mvm)); 562 .scd_queue = queue,
565 563 .enable = 0,
566 if (iwl_mvm_is_dqa_supported(mvm)) { 564 };
567 struct iwl_scd_txq_cfg_cmd cmd = { 565 int ret;
568 .scd_queue = queue, 566
569 .enable = 0, 567 if (!iwl_mvm_is_scd_cfg_supported(mvm)) {
570 }; 568 iwl_trans_txq_disable(mvm->trans, queue, true);
571 int ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, CMD_ASYNC, 569 return;
572 sizeof(cmd), &cmd);
573 if (ret)
574 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
575 queue, ret);
576 } 570 }
571
572 iwl_trans_txq_disable(mvm->trans, queue, false);
573 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
574 sizeof(cmd), &cmd);
575 if (ret)
576 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
577 queue, ret);
577} 578}
578 579
579/** 580/**
@@ -620,7 +621,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
620 lockdep_assert_held(&mvm->mutex); 621 lockdep_assert_held(&mvm->mutex);
621 622
622 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */ 623 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
623 if (num_of_ant(mvm->fw->valid_rx_ant) == 1) 624 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
624 return; 625 return;
625 626
626 if (vif->type == NL80211_IFTYPE_AP) 627 if (vif->type == NL80211_IFTYPE_AP)
@@ -662,7 +663,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
662 663
663 lockdep_assert_held(&mvm->mutex); 664 lockdep_assert_held(&mvm->mutex);
664 665
665 if (num_of_ant(mvm->fw->valid_rx_ant) == 1) 666 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
666 return false; 667 return false;
667 668
668 if (mvm->cfg->rx_with_siso_diversity) 669 if (mvm->cfg->rx_with_siso_diversity)
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index d5aadb00dd9e..dbd6bcf52205 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -415,6 +415,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
415 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, 415 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
416 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, 416 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
417 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, 417 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
418 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
419 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
418#endif /* CONFIG_IWLMVM */ 420#endif /* CONFIG_IWLMVM */
419 421
420 {0} 422 {0}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 1aea6b66c594..cae0eb8835ce 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -216,6 +216,7 @@ struct iwl_pcie_txq_scratch_buf {
216 * @need_update: indicates need to update read/write index 216 * @need_update: indicates need to update read/write index
217 * @active: stores if queue is active 217 * @active: stores if queue is active
218 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 218 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
219 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
219 * 220 *
220 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 221 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
221 * descriptors) and required locking structures. 222 * descriptors) and required locking structures.
@@ -232,6 +233,7 @@ struct iwl_txq {
232 bool need_update; 233 bool need_update;
233 u8 active; 234 u8 active;
234 bool ampdu; 235 bool ampdu;
236 unsigned long wd_timeout;
235}; 237};
236 238
237static inline dma_addr_t 239static inline dma_addr_t
@@ -259,7 +261,6 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
259 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 261 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
260 * @scd_set_active: should the transport configure the SCD for HCMD queue 262 * @scd_set_active: should the transport configure the SCD for HCMD queue
261 * @rx_page_order: page order for receive buffer size 263 * @rx_page_order: page order for receive buffer size
262 * @wd_timeout: queue watchdog timeout (jiffies)
263 * @reg_lock: protect hw register access 264 * @reg_lock: protect hw register access
264 * @cmd_in_flight: true when we have a host command in flight 265 * @cmd_in_flight: true when we have a host command in flight
265 * @fw_mon_phys: physical address of the buffer for the firmware monitor 266 * @fw_mon_phys: physical address of the buffer for the firmware monitor
@@ -302,6 +303,7 @@ struct iwl_trans_pcie {
302 303
303 u8 cmd_queue; 304 u8 cmd_queue;
304 u8 cmd_fifo; 305 u8 cmd_fifo;
306 unsigned int cmd_q_wdg_timeout;
305 u8 n_no_reclaim_cmds; 307 u8 n_no_reclaim_cmds;
306 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 308 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
307 309
@@ -312,12 +314,14 @@ struct iwl_trans_pcie {
312 314
313 const char *const *command_names; 315 const char *const *command_names;
314 316
315 /* queue watchdog */
316 unsigned long wd_timeout;
317
318 /*protect hw register */ 317 /*protect hw register */
319 spinlock_t reg_lock; 318 spinlock_t reg_lock;
320 bool cmd_in_flight; 319 bool cmd_in_flight;
320 bool ref_cmd_in_flight;
321
322 /* protect ref counter */
323 spinlock_t ref_lock;
324 u32 ref_count;
321 325
322 dma_addr_t fw_mon_phys; 326 dma_addr_t fw_mon_phys;
323 struct page *fw_mon_page; 327 struct page *fw_mon_page;
@@ -368,7 +372,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
368int iwl_pcie_tx_stop(struct iwl_trans *trans); 372int iwl_pcie_tx_stop(struct iwl_trans *trans);
369void iwl_pcie_tx_free(struct iwl_trans *trans); 373void iwl_pcie_tx_free(struct iwl_trans *trans);
370void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 374void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
371 const struct iwl_trans_txq_scd_cfg *cfg); 375 const struct iwl_trans_txq_scd_cfg *cfg,
376 unsigned int wdg_timeout);
372void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 377void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
373 bool configure_scd); 378 bool configure_scd);
374int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 379int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -381,6 +386,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
381 struct sk_buff_head *skbs); 386 struct sk_buff_head *skbs);
382void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 387void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
383 388
389void iwl_trans_pcie_ref(struct iwl_trans *trans);
390void iwl_trans_pcie_unref(struct iwl_trans *trans);
391
384static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) 392static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
385{ 393{
386 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 394 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 523fe0c88dcb..69935aa5a1b3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -75,6 +75,7 @@
75#include "iwl-trans.h" 75#include "iwl-trans.h"
76#include "iwl-csr.h" 76#include "iwl-csr.h"
77#include "iwl-prph.h" 77#include "iwl-prph.h"
78#include "iwl-scd.h"
78#include "iwl-agn-hw.h" 79#include "iwl-agn-hw.h"
79#include "iwl-fw-error-dump.h" 80#include "iwl-fw-error-dump.h"
80#include "internal.h" 81#include "internal.h"
@@ -443,10 +444,25 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
443 return ret; 444 return ret;
444} 445}
445 446
446static void iwl_pcie_apm_stop(struct iwl_trans *trans) 447static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
447{ 448{
448 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 449 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
449 450
451 if (op_mode_leave) {
452 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
453 iwl_pcie_apm_init(trans);
454
455 /* inform ME that we are leaving */
456 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
457 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
458 APMG_PCIDEV_STT_VAL_WAKE_ME);
459 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
460 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
461 CSR_HW_IF_CONFIG_REG_PREPARE |
462 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
463 mdelay(5);
464 }
465
450 clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 466 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
451 467
452 /* Stop device's DMA activity */ 468 /* Stop device's DMA activity */
@@ -707,6 +723,11 @@ static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
707 723
708 *first_ucode_section = last_read_idx; 724 *first_ucode_section = last_read_idx;
709 725
726 if (cpu == 1)
727 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
728 else
729 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
730
710 return 0; 731 return 0;
711} 732}
712 733
@@ -893,8 +914,8 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
893 if (ret) 914 if (ret)
894 return ret; 915 return ret;
895 916
896 /* Notify FW loading is done */ 917 if (trans->dbg_dest_tlv)
897 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); 918 iwl_pcie_apply_destination(trans);
898 919
899 /* wait for image verification to complete */ 920 /* wait for image verification to complete */
900 ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0, 921 ret = iwl_poll_prph_bit(trans, LMPM_SECURE_BOOT_CPU1_STATUS_ADDR_B0,
@@ -916,6 +937,7 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
916static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 937static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
917 const struct fw_img *fw, bool run_in_rfkill) 938 const struct fw_img *fw, bool run_in_rfkill)
918{ 939{
940 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
919 int ret; 941 int ret;
920 bool hw_rfkill; 942 bool hw_rfkill;
921 943
@@ -945,6 +967,9 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
945 return ret; 967 return ret;
946 } 968 }
947 969
970 /* init ref_count to 1 (should be cleared when ucode is loaded) */
971 trans_pcie->ref_count = 1;
972
948 /* make sure rfkill handshake bits are cleared */ 973 /* make sure rfkill handshake bits are cleared */
949 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 974 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
950 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 975 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
@@ -960,7 +985,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
960 985
961 /* Load the given image to the HW */ 986 /* Load the given image to the HW */
962 if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) && 987 if ((trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) &&
963 (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)) 988 (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP))
964 return iwl_pcie_load_given_ucode_8000b(trans, fw); 989 return iwl_pcie_load_given_ucode_8000b(trans, fw);
965 else 990 else
966 return iwl_pcie_load_given_ucode(trans, fw); 991 return iwl_pcie_load_given_ucode(trans, fw);
@@ -1010,7 +1035,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1010 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1035 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1011 1036
1012 /* Stop the device, and put it in low power state */ 1037 /* Stop the device, and put it in low power state */
1013 iwl_pcie_apm_stop(trans); 1038 iwl_pcie_apm_stop(trans, false);
1014 1039
1015 /* stop and reset the on-board processor */ 1040 /* stop and reset the on-board processor */
1016 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 1041 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -1192,7 +1217,7 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1192 iwl_disable_interrupts(trans); 1217 iwl_disable_interrupts(trans);
1193 spin_unlock(&trans_pcie->irq_lock); 1218 spin_unlock(&trans_pcie->irq_lock);
1194 1219
1195 iwl_pcie_apm_stop(trans); 1220 iwl_pcie_apm_stop(trans, true);
1196 1221
1197 spin_lock(&trans_pcie->irq_lock); 1222 spin_lock(&trans_pcie->irq_lock);
1198 iwl_disable_interrupts(trans); 1223 iwl_disable_interrupts(trans);
@@ -1244,6 +1269,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1244 1269
1245 trans_pcie->cmd_queue = trans_cfg->cmd_queue; 1270 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1246 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; 1271 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1272 trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1247 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1273 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1248 trans_pcie->n_no_reclaim_cmds = 0; 1274 trans_pcie->n_no_reclaim_cmds = 0;
1249 else 1275 else
@@ -1258,9 +1284,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1258 else 1284 else
1259 trans_pcie->rx_page_order = get_order(4 * 1024); 1285 trans_pcie->rx_page_order = get_order(4 * 1024);
1260 1286
1261 trans_pcie->wd_timeout =
1262 msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
1263
1264 trans_pcie->command_names = trans_cfg->command_names; 1287 trans_pcie->command_names = trans_cfg->command_names;
1265 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; 1288 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1266 trans_pcie->scd_set_active = trans_cfg->scd_set_active; 1289 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -1540,6 +1563,38 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1540 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1563 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1541} 1564}
1542 1565
1566void iwl_trans_pcie_ref(struct iwl_trans *trans)
1567{
1568 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1569 unsigned long flags;
1570
1571 if (iwlwifi_mod_params.d0i3_disable)
1572 return;
1573
1574 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
1575 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
1576 trans_pcie->ref_count++;
1577 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1578}
1579
1580void iwl_trans_pcie_unref(struct iwl_trans *trans)
1581{
1582 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1583 unsigned long flags;
1584
1585 if (iwlwifi_mod_params.d0i3_disable)
1586 return;
1587
1588 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
1589 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
1590 if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
1591 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1592 return;
1593 }
1594 trans_pcie->ref_count--;
1595 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1596}
1597
1543static const char *get_csr_string(int cmd) 1598static const char *get_csr_string(int cmd)
1544{ 1599{
1545#define IWL_CMD(x) case x: return #x 1600#define IWL_CMD(x) case x: return #x
@@ -2264,6 +2319,9 @@ static const struct iwl_trans_ops trans_ops_pcie = {
2264 .release_nic_access = iwl_trans_pcie_release_nic_access, 2319 .release_nic_access = iwl_trans_pcie_release_nic_access,
2265 .set_bits_mask = iwl_trans_pcie_set_bits_mask, 2320 .set_bits_mask = iwl_trans_pcie_set_bits_mask,
2266 2321
2322 .ref = iwl_trans_pcie_ref,
2323 .unref = iwl_trans_pcie_unref,
2324
2267 .dump_data = iwl_trans_pcie_dump_data, 2325 .dump_data = iwl_trans_pcie_dump_data,
2268}; 2326};
2269 2327
@@ -2291,6 +2349,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2291 trans_pcie->trans = trans; 2349 trans_pcie->trans = trans;
2292 spin_lock_init(&trans_pcie->irq_lock); 2350 spin_lock_init(&trans_pcie->irq_lock);
2293 spin_lock_init(&trans_pcie->reg_lock); 2351 spin_lock_init(&trans_pcie->reg_lock);
2352 spin_lock_init(&trans_pcie->ref_lock);
2294 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2353 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2295 2354
2296 err = pci_enable_device(pdev); 2355 err = pci_enable_device(pdev);
@@ -2404,6 +2463,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2404 } 2463 }
2405 2464
2406 trans_pcie->inta_mask = CSR_INI_SET_MASK; 2465 trans_pcie->inta_mask = CSR_INI_SET_MASK;
2466 trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND;
2407 2467
2408 return trans; 2468 return trans;
2409 2469
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8a6c7a084aa1..af0bce736358 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -147,7 +147,6 @@ static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
147static void iwl_pcie_txq_stuck_timer(unsigned long data) 147static void iwl_pcie_txq_stuck_timer(unsigned long data)
148{ 148{
149 struct iwl_txq *txq = (void *)data; 149 struct iwl_txq *txq = (void *)data;
150 struct iwl_queue *q = &txq->q;
151 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 150 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
152 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 151 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
153 u32 scd_sram_addr = trans_pcie->scd_base_addr + 152 u32 scd_sram_addr = trans_pcie->scd_base_addr +
@@ -164,7 +163,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
164 spin_unlock(&txq->lock); 163 spin_unlock(&txq->lock);
165 164
166 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, 165 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
167 jiffies_to_msecs(trans_pcie->wd_timeout)); 166 jiffies_to_msecs(txq->wd_timeout));
168 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", 167 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
169 txq->q.read_ptr, txq->q.write_ptr); 168 txq->q.read_ptr, txq->q.write_ptr);
170 169
@@ -198,11 +197,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
198 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); 197 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
199 } 198 }
200 199
201 for (i = q->read_ptr; i != q->write_ptr;
202 i = iwl_queue_inc_wrap(i))
203 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
204 le32_to_cpu(txq->scratchbufs[i].scratch));
205
206 iwl_force_nmi(trans); 200 iwl_force_nmi(trans);
207} 201}
208 202
@@ -680,7 +674,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
680 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 674 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
681 675
682 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, 676 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
683 trans_pcie->cmd_fifo); 677 trans_pcie->cmd_fifo,
678 trans_pcie->cmd_q_wdg_timeout);
684 679
685 /* Activate all Tx DMA/FIFO channels */ 680 /* Activate all Tx DMA/FIFO channels */
686 iwl_scd_activate_fifos(trans); 681 iwl_scd_activate_fifos(trans);
@@ -722,7 +717,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
722 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 717 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
723 trans_pcie->kw.dma >> 4); 718 trans_pcie->kw.dma >> 4);
724 719
725 iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr); 720 /*
721 * Send 0 as the scd_base_addr since the device may have be reset
722 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
723 * contain garbage.
724 */
725 iwl_pcie_tx_start(trans, 0);
726} 726}
727 727
728/* 728/*
@@ -898,6 +898,10 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
898 } 898 }
899 } 899 }
900 900
901 if (trans->cfg->base_params->num_of_queues > 20)
902 iwl_set_bits_prph(trans, SCD_GP_CTRL,
903 SCD_GP_CTRL_ENABLE_31_QUEUES);
904
901 return 0; 905 return 0;
902error: 906error:
903 /*Upon error, free only if we allocated something */ 907 /*Upon error, free only if we allocated something */
@@ -906,10 +910,9 @@ error:
906 return ret; 910 return ret;
907} 911}
908 912
909static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie, 913static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
910 struct iwl_txq *txq)
911{ 914{
912 if (!trans_pcie->wd_timeout) 915 if (!txq->wd_timeout)
913 return; 916 return;
914 917
915 /* 918 /*
@@ -919,7 +922,7 @@ static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
919 if (txq->q.read_ptr == txq->q.write_ptr) 922 if (txq->q.read_ptr == txq->q.write_ptr)
920 del_timer(&txq->stuck_timer); 923 del_timer(&txq->stuck_timer);
921 else 924 else
922 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); 925 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
923} 926}
924 927
925/* Frees buffers until index _not_ inclusive */ 928/* Frees buffers until index _not_ inclusive */
@@ -981,21 +984,35 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
981 iwl_pcie_txq_free_tfd(trans, txq); 984 iwl_pcie_txq_free_tfd(trans, txq);
982 } 985 }
983 986
984 iwl_pcie_txq_progress(trans_pcie, txq); 987 iwl_pcie_txq_progress(txq);
985 988
986 if (iwl_queue_space(&txq->q) > txq->q.low_mark) 989 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
987 iwl_wake_queue(trans, txq); 990 iwl_wake_queue(trans, txq);
991
992 if (q->read_ptr == q->write_ptr) {
993 IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
994 iwl_trans_pcie_unref(trans);
995 }
996
988out: 997out:
989 spin_unlock_bh(&txq->lock); 998 spin_unlock_bh(&txq->lock);
990} 999}
991 1000
992static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans) 1001static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1002 const struct iwl_host_cmd *cmd)
993{ 1003{
994 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1004 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
995 int ret; 1005 int ret;
996 1006
997 lockdep_assert_held(&trans_pcie->reg_lock); 1007 lockdep_assert_held(&trans_pcie->reg_lock);
998 1008
1009 if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
1010 !trans_pcie->ref_cmd_in_flight) {
1011 trans_pcie->ref_cmd_in_flight = true;
1012 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
1013 iwl_trans_pcie_ref(trans);
1014 }
1015
999 if (trans_pcie->cmd_in_flight) 1016 if (trans_pcie->cmd_in_flight)
1000 return 0; 1017 return 0;
1001 1018
@@ -1036,6 +1053,12 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
1036 1053
1037 lockdep_assert_held(&trans_pcie->reg_lock); 1054 lockdep_assert_held(&trans_pcie->reg_lock);
1038 1055
1056 if (trans_pcie->ref_cmd_in_flight) {
1057 trans_pcie->ref_cmd_in_flight = false;
1058 IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
1059 iwl_trans_pcie_unref(trans);
1060 }
1061
1039 if (WARN_ON(!trans_pcie->cmd_in_flight)) 1062 if (WARN_ON(!trans_pcie->cmd_in_flight))
1040 return 0; 1063 return 0;
1041 1064
@@ -1089,7 +1112,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1089 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1112 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1090 } 1113 }
1091 1114
1092 iwl_pcie_txq_progress(trans_pcie, txq); 1115 iwl_pcie_txq_progress(txq);
1093} 1116}
1094 1117
1095static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 1118static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
@@ -1122,14 +1145,18 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1122#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 1145#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1123 1146
1124void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 1147void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1125 const struct iwl_trans_txq_scd_cfg *cfg) 1148 const struct iwl_trans_txq_scd_cfg *cfg,
1149 unsigned int wdg_timeout)
1126{ 1150{
1127 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1151 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1152 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1128 int fifo = -1; 1153 int fifo = -1;
1129 1154
1130 if (test_and_set_bit(txq_id, trans_pcie->queue_used)) 1155 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
1131 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1156 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1132 1157
1158 txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
1159
1133 if (cfg) { 1160 if (cfg) {
1134 fifo = cfg->fifo; 1161 fifo = cfg->fifo;
1135 1162
@@ -1153,7 +1180,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1153 1180
1154 /* enable aggregations for the queue */ 1181 /* enable aggregations for the queue */
1155 iwl_scd_txq_enable_agg(trans, txq_id); 1182 iwl_scd_txq_enable_agg(trans, txq_id);
1156 trans_pcie->txq[txq_id].ampdu = true; 1183 txq->ampdu = true;
1157 } else { 1184 } else {
1158 /* 1185 /*
1159 * disable aggregations for the queue, this will also 1186 * disable aggregations for the queue, this will also
@@ -1162,20 +1189,20 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1162 */ 1189 */
1163 iwl_scd_txq_disable_agg(trans, txq_id); 1190 iwl_scd_txq_disable_agg(trans, txq_id);
1164 1191
1165 ssn = trans_pcie->txq[txq_id].q.read_ptr; 1192 ssn = txq->q.read_ptr;
1166 } 1193 }
1167 } 1194 }
1168 1195
1169 /* Place first TFD at index corresponding to start sequence number. 1196 /* Place first TFD at index corresponding to start sequence number.
1170 * Assumes that ssn_idx is valid (!= 0xFFF) */ 1197 * Assumes that ssn_idx is valid (!= 0xFFF) */
1171 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); 1198 txq->q.read_ptr = (ssn & 0xff);
1172 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); 1199 txq->q.write_ptr = (ssn & 0xff);
1200 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1201 (ssn & 0xff) | (txq_id << 8));
1173 1202
1174 if (cfg) { 1203 if (cfg) {
1175 u8 frame_limit = cfg->frame_limit; 1204 u8 frame_limit = cfg->frame_limit;
1176 1205
1177 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1178 (ssn & 0xff) | (txq_id << 8));
1179 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1206 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1180 1207
1181 /* Set up Tx window size and frame limit for this queue */ 1208 /* Set up Tx window size and frame limit for this queue */
@@ -1200,11 +1227,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1200 if (txq_id == trans_pcie->cmd_queue && 1227 if (txq_id == trans_pcie->cmd_queue &&
1201 trans_pcie->scd_set_active) 1228 trans_pcie->scd_set_active)
1202 iwl_scd_enable_set_active(trans, BIT(txq_id)); 1229 iwl_scd_enable_set_active(trans, BIT(txq_id));
1230
1231 IWL_DEBUG_TX_QUEUES(trans,
1232 "Activate queue %d on FIFO %d WrPtr: %d\n",
1233 txq_id, fifo, ssn & 0xff);
1234 } else {
1235 IWL_DEBUG_TX_QUEUES(trans,
1236 "Activate queue %d WrPtr: %d\n",
1237 txq_id, ssn & 0xff);
1203 } 1238 }
1204 1239
1205 trans_pcie->txq[txq_id].active = true; 1240 txq->active = true;
1206 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
1207 txq_id, fifo, ssn & 0xff);
1208} 1241}
1209 1242
1210void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1243void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
@@ -1469,11 +1502,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1469 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); 1502 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
1470 1503
1471 /* start timer if queue currently empty */ 1504 /* start timer if queue currently empty */
1472 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) 1505 if (q->read_ptr == q->write_ptr && txq->wd_timeout)
1473 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); 1506 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1474 1507
1475 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1508 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1476 ret = iwl_pcie_set_cmd_in_flight(trans); 1509 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1477 if (ret < 0) { 1510 if (ret < 0) {
1478 idx = ret; 1511 idx = ret;
1479 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1512 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1819,9 +1852,12 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1819 wait_write_ptr = ieee80211_has_morefrags(fc); 1852 wait_write_ptr = ieee80211_has_morefrags(fc);
1820 1853
1821 /* start timer if queue currently empty */ 1854 /* start timer if queue currently empty */
1822 if (txq->need_update && q->read_ptr == q->write_ptr && 1855 if (q->read_ptr == q->write_ptr) {
1823 trans_pcie->wd_timeout) 1856 if (txq->wd_timeout)
1824 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); 1857 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1858 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
1859 iwl_trans_pcie_ref(trans);
1860 }
1825 1861
1826 /* Tell device the write index *just past* this latest filled TFD */ 1862 /* Tell device the write index *just past* this latest filled TFD */
1827 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); 1863 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 34f09ef90bb3..a92985a6ea21 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1616,10 +1616,10 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
1616 1616
1617 lbs_deb_enter(LBS_DEB_CFG80211); 1617 lbs_deb_enter(LBS_DEB_CFG80211);
1618 1618
1619 sinfo->filled |= STATION_INFO_TX_BYTES | 1619 sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES) |
1620 STATION_INFO_TX_PACKETS | 1620 BIT(NL80211_STA_INFO_TX_PACKETS) |
1621 STATION_INFO_RX_BYTES | 1621 BIT(NL80211_STA_INFO_RX_BYTES) |
1622 STATION_INFO_RX_PACKETS; 1622 BIT(NL80211_STA_INFO_RX_PACKETS);
1623 sinfo->tx_bytes = priv->dev->stats.tx_bytes; 1623 sinfo->tx_bytes = priv->dev->stats.tx_bytes;
1624 sinfo->tx_packets = priv->dev->stats.tx_packets; 1624 sinfo->tx_packets = priv->dev->stats.tx_packets;
1625 sinfo->rx_bytes = priv->dev->stats.rx_bytes; 1625 sinfo->rx_bytes = priv->dev->stats.rx_bytes;
@@ -1629,14 +1629,14 @@ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
1629 ret = lbs_get_rssi(priv, &signal, &noise); 1629 ret = lbs_get_rssi(priv, &signal, &noise);
1630 if (ret == 0) { 1630 if (ret == 0) {
1631 sinfo->signal = signal; 1631 sinfo->signal = signal;
1632 sinfo->filled |= STATION_INFO_SIGNAL; 1632 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
1633 } 1633 }
1634 1634
1635 /* Convert priv->cur_rate from hw_value to NL80211 value */ 1635 /* Convert priv->cur_rate from hw_value to NL80211 value */
1636 for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) { 1636 for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) {
1637 if (priv->cur_rate == lbs_rates[i].hw_value) { 1637 if (priv->cur_rate == lbs_rates[i].hw_value) {
1638 sinfo->txrate.legacy = lbs_rates[i].bitrate; 1638 sinfo->txrate.legacy = lbs_rates[i].bitrate;
1639 sinfo->filled |= STATION_INFO_TX_BITRATE; 1639 sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
1640 break; 1640 break;
1641 } 1641 }
1642 } 1642 }
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index ef58a8862d91..4a4c6586a8d2 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -625,22 +625,22 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
625 old_ps = data->ps; 625 old_ps = data->ps;
626 data->ps = val; 626 data->ps = val;
627 627
628 local_bh_disable();
628 if (val == PS_MANUAL_POLL) { 629 if (val == PS_MANUAL_POLL) {
629 ieee80211_iterate_active_interfaces(data->hw, 630 ieee80211_iterate_active_interfaces_atomic(
630 IEEE80211_IFACE_ITER_NORMAL, 631 data->hw, IEEE80211_IFACE_ITER_NORMAL,
631 hwsim_send_ps_poll, data); 632 hwsim_send_ps_poll, data);
632 data->ps_poll_pending = true; 633 data->ps_poll_pending = true;
633 } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { 634 } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
634 ieee80211_iterate_active_interfaces(data->hw, 635 ieee80211_iterate_active_interfaces_atomic(
635 IEEE80211_IFACE_ITER_NORMAL, 636 data->hw, IEEE80211_IFACE_ITER_NORMAL,
636 hwsim_send_nullfunc_ps, 637 hwsim_send_nullfunc_ps, data);
637 data);
638 } else if (old_ps != PS_DISABLED && val == PS_DISABLED) { 638 } else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
639 ieee80211_iterate_active_interfaces(data->hw, 639 ieee80211_iterate_active_interfaces_atomic(
640 IEEE80211_IFACE_ITER_NORMAL, 640 data->hw, IEEE80211_IFACE_ITER_NORMAL,
641 hwsim_send_nullfunc_no_ps, 641 hwsim_send_nullfunc_no_ps, data);
642 data);
643 } 642 }
643 local_bh_enable();
644 644
645 return 0; 645 return 0;
646} 646}
@@ -2149,14 +2149,14 @@ static int append_radio_msg(struct sk_buff *skb, int id,
2149 if (param->regd) { 2149 if (param->regd) {
2150 int i; 2150 int i;
2151 2151
2152 for (i = 0; hwsim_world_regdom_custom[i] != param->regd && 2152 for (i = 0; i < ARRAY_SIZE(hwsim_world_regdom_custom); i++) {
2153 i < ARRAY_SIZE(hwsim_world_regdom_custom); i++) 2153 if (hwsim_world_regdom_custom[i] != param->regd)
2154 ; 2154 continue;
2155 2155
2156 if (i < ARRAY_SIZE(hwsim_world_regdom_custom)) {
2157 ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i); 2156 ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i);
2158 if (ret < 0) 2157 if (ret < 0)
2159 return ret; 2158 return ret;
2159 break;
2160 } 2160 }
2161 } 2161 }
2162 2162
@@ -2557,7 +2557,8 @@ static int mac80211_hwsim_get_radio(struct sk_buff *skb,
2557 if (res < 0) 2557 if (res < 0)
2558 goto out_err; 2558 goto out_err;
2559 2559
2560 return genlmsg_end(skb, hdr); 2560 genlmsg_end(skb, hdr);
2561 return 0;
2561 2562
2562out_err: 2563out_err:
2563 genlmsg_cancel(skb, hdr); 2564 genlmsg_cancel(skb, hdr);
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
index 2668e83afbb6..3ab87a855122 100644
--- a/drivers/net/wireless/mwifiex/11h.c
+++ b/drivers/net/wireless/mwifiex/11h.c
@@ -21,6 +21,16 @@
21#include "fw.h" 21#include "fw.h"
22 22
23 23
24void mwifiex_init_11h_params(struct mwifiex_private *priv)
25{
26 priv->state_11h.is_11h_enabled = true;
27 priv->state_11h.is_11h_active = false;
28}
29
30inline int mwifiex_is_11h_active(struct mwifiex_private *priv)
31{
32 return priv->state_11h.is_11h_active;
33}
24/* This function appends 11h info to a buffer while joining an 34/* This function appends 11h info to a buffer while joining an
25 * infrastructure BSS 35 * infrastructure BSS
26 */ 36 */
@@ -39,7 +49,7 @@ mwifiex_11h_process_infra_join(struct mwifiex_private *priv, u8 **buffer,
39 return; 49 return;
40 50
41 radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band); 51 radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
42 sband = priv->wdev->wiphy->bands[radio_type]; 52 sband = priv->wdev.wiphy->bands[radio_type];
43 53
44 cap = (struct mwifiex_ie_types_pwr_capability *)*buffer; 54 cap = (struct mwifiex_ie_types_pwr_capability *)*buffer;
45 cap->header.type = cpu_to_le16(WLAN_EID_PWR_CAPABILITY); 55 cap->header.type = cpu_to_le16(WLAN_EID_PWR_CAPABILITY);
@@ -69,10 +79,14 @@ mwifiex_11h_process_infra_join(struct mwifiex_private *priv, u8 **buffer,
69} 79}
70 80
71/* Enable or disable the 11h extensions in the firmware */ 81/* Enable or disable the 11h extensions in the firmware */
72static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag) 82int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
73{ 83{
74 u32 enable = flag; 84 u32 enable = flag;
75 85
86 /* enable master mode radar detection on AP interface */
87 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && enable)
88 enable |= MWIFIEX_MASTER_RADAR_DET_MASK;
89
76 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB, 90 return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
77 HostCmd_ACT_GEN_SET, DOT11H_I, &enable, true); 91 HostCmd_ACT_GEN_SET, DOT11H_I, &enable, true);
78} 92}
@@ -91,11 +105,191 @@ void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
91 * bit 105 * bit
92 */ 106 */
93 mwifiex_11h_activate(priv, true); 107 mwifiex_11h_activate(priv, true);
108 priv->state_11h.is_11h_active = true;
94 bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_SPECTRUM_MGMT; 109 bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_SPECTRUM_MGMT;
95 mwifiex_11h_process_infra_join(priv, buffer, bss_desc); 110 mwifiex_11h_process_infra_join(priv, buffer, bss_desc);
96 } else { 111 } else {
97 /* Deactivate 11h functions in the firmware */ 112 /* Deactivate 11h functions in the firmware */
98 mwifiex_11h_activate(priv, false); 113 mwifiex_11h_activate(priv, false);
114 priv->state_11h.is_11h_active = false;
99 bss_desc->cap_info_bitmap &= ~WLAN_CAPABILITY_SPECTRUM_MGMT; 115 bss_desc->cap_info_bitmap &= ~WLAN_CAPABILITY_SPECTRUM_MGMT;
100 } 116 }
101} 117}
118
119/* This is DFS CAC work queue function.
120 * This delayed work emits CAC finished event for cfg80211 if
121 * CAC was started earlier.
122 */
123void mwifiex_dfs_cac_work_queue(struct work_struct *work)
124{
125 struct cfg80211_chan_def chandef;
126 struct delayed_work *delayed_work =
127 container_of(work, struct delayed_work, work);
128 struct mwifiex_private *priv =
129 container_of(delayed_work, struct mwifiex_private,
130 dfs_cac_work);
131
132 if (WARN_ON(!priv))
133 return;
134
135 chandef = priv->dfs_chandef;
136 if (priv->wdev.cac_started) {
137 dev_dbg(priv->adapter->dev,
138 "CAC timer finished; No radar detected\n");
139 cfg80211_cac_event(priv->netdev, &chandef,
140 NL80211_RADAR_CAC_FINISHED,
141 GFP_KERNEL);
142 }
143}
144
145/* This function prepares channel report request command to FW for
146 * starting radar detection.
147 */
148int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
149 struct host_cmd_ds_command *cmd,
150 void *data_buf)
151{
152 struct host_cmd_ds_chan_rpt_req *cr_req = &cmd->params.chan_rpt_req;
153 struct mwifiex_radar_params *radar_params = (void *)data_buf;
154
155 cmd->command = cpu_to_le16(HostCmd_CMD_CHAN_REPORT_REQUEST);
156 cmd->size = cpu_to_le16(S_DS_GEN);
157 le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_chan_rpt_req));
158
159 cr_req->chan_desc.start_freq = cpu_to_le16(MWIFIEX_A_BAND_START_FREQ);
160 cr_req->chan_desc.chan_num = radar_params->chandef->chan->hw_value;
161 cr_req->chan_desc.chan_width = radar_params->chandef->width;
162 cr_req->msec_dwell_time = cpu_to_le32(radar_params->cac_time_ms);
163
164 dev_dbg(priv->adapter->dev,
165 "11h: issuing DFS Radar check for channel=%d\n",
166 radar_params->chandef->chan->hw_value);
167
168 return 0;
169}
170
171/* This function is to abort ongoing CAC upon stopping AP operations
172 * or during unload.
173 */
174void mwifiex_abort_cac(struct mwifiex_private *priv)
175{
176 if (priv->wdev.cac_started) {
177 dev_dbg(priv->adapter->dev,
178 "Aborting delayed work for CAC.\n");
179 cancel_delayed_work_sync(&priv->dfs_cac_work);
180 cfg80211_cac_event(priv->netdev, &priv->dfs_chandef,
181 NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
182 }
183}
184
185/* This function handles channel report event from FW during CAC period.
186 * If radar is detected during CAC, driver indicates the same to cfg80211
187 * and also cancels ongoing delayed work.
188 */
189int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
190 struct sk_buff *skb)
191{
192 struct host_cmd_ds_chan_rpt_event *rpt_event;
193 struct mwifiex_ie_types_chan_rpt_data *rpt;
194 u8 *evt_buf;
195 u16 event_len, tlv_len;
196
197 rpt_event = (void *)(skb->data + sizeof(u32));
198 event_len = skb->len - (sizeof(struct host_cmd_ds_chan_rpt_event)+
199 sizeof(u32));
200
201 if (le32_to_cpu(rpt_event->result) != HostCmd_RESULT_OK) {
202 dev_err(priv->adapter->dev, "Error in channel report event\n");
203 return -1;
204 }
205
206 evt_buf = (void *)&rpt_event->tlvbuf;
207
208 while (event_len >= sizeof(struct mwifiex_ie_types_header)) {
209 rpt = (void *)&rpt_event->tlvbuf;
210 tlv_len = le16_to_cpu(rpt->header.len);
211
212 switch (le16_to_cpu(rpt->header.type)) {
213 case TLV_TYPE_CHANRPT_11H_BASIC:
214 if (rpt->map.radar) {
215 dev_notice(priv->adapter->dev,
216 "RADAR Detected on channel %d!\n",
217 priv->dfs_chandef.chan->hw_value);
218 cancel_delayed_work_sync(&priv->dfs_cac_work);
219 cfg80211_cac_event(priv->netdev,
220 &priv->dfs_chandef,
221 NL80211_RADAR_DETECTED,
222 GFP_KERNEL);
223 }
224 break;
225 default:
226 break;
227 }
228
229 evt_buf += (tlv_len + sizeof(rpt->header));
230 event_len -= (tlv_len + sizeof(rpt->header));
231 }
232
233 return 0;
234}
235
236/* Handler for radar detected event from FW.*/
237int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
238 struct sk_buff *skb)
239{
240 struct mwifiex_radar_det_event *rdr_event;
241
242 rdr_event = (void *)(skb->data + sizeof(u32));
243
244 if (le32_to_cpu(rdr_event->passed)) {
245 dev_notice(priv->adapter->dev,
246 "radar detected; indicating kernel\n");
247 cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
248 GFP_KERNEL);
249 dev_dbg(priv->adapter->dev, "regdomain: %d\n",
250 rdr_event->reg_domain);
251 dev_dbg(priv->adapter->dev, "radar detection type: %d\n",
252 rdr_event->det_type);
253 } else {
254 dev_dbg(priv->adapter->dev, "false radar detection event!\n");
255 }
256
257 return 0;
258}
259
260/* This is work queue function for channel switch handling.
261 * This function takes care of updating new channel definitin to
262 * bss config structure, restart AP and indicate channel switch success
263 * to cfg80211.
264 */
265void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
266{
267 struct mwifiex_uap_bss_param *bss_cfg;
268 struct delayed_work *delayed_work =
269 container_of(work, struct delayed_work, work);
270 struct mwifiex_private *priv =
271 container_of(delayed_work, struct mwifiex_private,
272 dfs_chan_sw_work);
273
274 if (WARN_ON(!priv))
275 return;
276
277 bss_cfg = &priv->bss_cfg;
278 if (!bss_cfg->beacon_period) {
279 dev_err(priv->adapter->dev,
280 "channel switch: AP already stopped\n");
281 return;
282 }
283
284 mwifiex_uap_set_channel(bss_cfg, priv->dfs_chandef);
285
286 if (mwifiex_config_start_uap(priv, bss_cfg)) {
287 dev_dbg(priv->adapter->dev,
288 "Failed to start AP after channel switch\n");
289 return;
290 }
291
292 dev_notice(priv->adapter->dev,
293 "indicating channel switch completion to kernel\n");
294 cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
295}
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 9d4786e7ddff..543148d27b01 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -39,7 +39,7 @@ int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
39{ 39{
40 uint16_t ht_ext_cap = le16_to_cpu(ht_cap->extended_ht_cap_info); 40 uint16_t ht_ext_cap = le16_to_cpu(ht_cap->extended_ht_cap_info);
41 struct ieee80211_supported_band *sband = 41 struct ieee80211_supported_band *sband =
42 priv->wdev->wiphy->bands[radio_type]; 42 priv->wdev.wiphy->bands[radio_type];
43 43
44 if (WARN_ON_ONCE(!sband)) { 44 if (WARN_ON_ONCE(!sband)) {
45 dev_err(priv->adapter->dev, "Invalid radio type!\n"); 45 dev_err(priv->adapter->dev, "Invalid radio type!\n");
@@ -314,7 +314,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
314 return ret_len; 314 return ret_len;
315 315
316 radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band); 316 radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
317 sband = priv->wdev->wiphy->bands[radio_type]; 317 sband = priv->wdev.wiphy->bands[radio_type];
318 318
319 if (bss_desc->bcn_ht_cap) { 319 if (bss_desc->bcn_ht_cap) {
320 ht_cap = (struct mwifiex_ie_types_htcap *) *buffer; 320 ht_cap = (struct mwifiex_ie_types_htcap *) *buffer;
@@ -558,10 +558,10 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
558 spin_lock_irqsave(&priv->sta_list_spinlock, flags); 558 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
559 sta_ptr = mwifiex_get_sta_entry(priv, peer_mac); 559 sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
560 if (!sta_ptr) { 560 if (!sta_ptr) {
561 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
561 dev_warn(priv->adapter->dev, 562 dev_warn(priv->adapter->dev,
562 "BA setup with unknown TDLS peer %pM!\n", 563 "BA setup with unknown TDLS peer %pM!\n",
563 peer_mac); 564 peer_mac);
564 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
565 return -1; 565 return -1;
566 } 566 }
567 if (sta_ptr->is_11ac_enabled) 567 if (sta_ptr->is_11ac_enabled)
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index f275675cdbd3..8e2e39422ad8 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -130,7 +130,9 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
130{ 130{
131 struct mwifiex_private *priv; 131 struct mwifiex_private *priv;
132 u8 i; 132 u8 i;
133 u32 ba_stream_num = 0; 133 u32 ba_stream_num = 0, ba_stream_max;
134
135 ba_stream_max = MWIFIEX_MAX_TX_BASTREAM_SUPPORTED;
134 136
135 for (i = 0; i < adapter->priv_num; i++) { 137 for (i = 0; i < adapter->priv_num; i++) {
136 priv = adapter->priv[i]; 138 priv = adapter->priv[i];
@@ -139,8 +141,14 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
139 &priv->tx_ba_stream_tbl_ptr); 141 &priv->tx_ba_stream_tbl_ptr);
140 } 142 }
141 143
142 return ((ba_stream_num < 144 if (adapter->fw_api_ver == MWIFIEX_FW_V15) {
143 MWIFIEX_MAX_TX_BASTREAM_SUPPORTED) ? true : false); 145 ba_stream_max =
146 GETSUPP_TXBASTREAMS(adapter->hw_dot_11n_dev_cap);
147 if (!ba_stream_max)
148 ba_stream_max = MWIFIEX_MAX_TX_BASTREAM_SUPPORTED;
149 }
150
151 return ((ba_stream_num < ba_stream_max) ? true : false);
144} 152}
145 153
146/* 154/*
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 8720a3d3c755..9b983b5cebbd 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -101,6 +101,13 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
101{ 101{
102 struct txpd *local_tx_pd; 102 struct txpd *local_tx_pd;
103 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); 103 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
104 unsigned int pad;
105 int headroom = (priv->adapter->iface_type ==
106 MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
107
108 pad = ((void *)skb->data - sizeof(*local_tx_pd) -
109 headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
110 skb_push(skb, pad);
104 111
105 skb_push(skb, sizeof(*local_tx_pd)); 112 skb_push(skb, sizeof(*local_tx_pd));
106 113
@@ -114,10 +121,12 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
114 local_tx_pd->bss_num = priv->bss_num; 121 local_tx_pd->bss_num = priv->bss_num;
115 local_tx_pd->bss_type = priv->bss_type; 122 local_tx_pd->bss_type = priv->bss_type;
116 /* Always zero as the data is followed by struct txpd */ 123 /* Always zero as the data is followed by struct txpd */
117 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd)); 124 local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) +
125 pad);
118 local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU); 126 local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
119 local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len - 127 local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
120 sizeof(*local_tx_pd)); 128 sizeof(*local_tx_pd) -
129 pad);
121 130
122 if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT) 131 if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
123 local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET; 132 local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
@@ -182,7 +191,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
182 ra_list_flags); 191 ra_list_flags);
183 return -1; 192 return -1;
184 } 193 }
185 skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); 194 skb_reserve(skb_aggr, MWIFIEX_MIN_DATA_HEADER_LEN);
186 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); 195 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
187 196
188 memset(tx_info_aggr, 0, sizeof(*tx_info_aggr)); 197 memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index d73fda312c87..a2e8817b56d8 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
45 skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length)); 45 skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
46 46
47 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, 47 ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
48 priv->wdev->iftype, 0, false); 48 priv->wdev.iftype, 0, false);
49 49
50 while (!skb_queue_empty(&list)) { 50 while (!skb_queue_empty(&list)) {
51 rx_skb = __skb_dequeue(&list); 51 rx_skb = __skb_dequeue(&list);
@@ -353,9 +353,6 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
353 353
354 spin_lock_irqsave(&priv->sta_list_spinlock, flags); 354 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
355 if (mwifiex_queuing_ra_based(priv)) { 355 if (mwifiex_queuing_ra_based(priv)) {
356 dev_dbg(priv->adapter->dev,
357 "info: AP/ADHOC:last_seq=%d start_win=%d\n",
358 last_seq, new_node->start_win);
359 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) { 356 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
360 node = mwifiex_get_sta_entry(priv, ta); 357 node = mwifiex_get_sta_entry(priv, ta);
361 if (node) 358 if (node)
@@ -370,6 +367,9 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
370 } 367 }
371 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); 368 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
372 369
370 dev_dbg(priv->adapter->dev, "info: last_seq=%d start_win=%d\n",
371 last_seq, new_node->start_win);
372
373 if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM && 373 if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
374 last_seq >= new_node->start_win) { 374 last_seq >= new_node->start_win) {
375 new_node->start_win = last_seq + 1; 375 new_node->start_win = last_seq + 1;
@@ -391,10 +391,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
391 new_node->timer_context.priv = priv; 391 new_node->timer_context.priv = priv;
392 new_node->timer_context.timer_is_set = false; 392 new_node->timer_context.timer_is_set = false;
393 393
394 init_timer(&new_node->timer_context.timer); 394 setup_timer(&new_node->timer_context.timer, mwifiex_flush_data,
395 new_node->timer_context.timer.function = mwifiex_flush_data; 395 (unsigned long)&new_node->timer_context);
396 new_node->timer_context.timer.data =
397 (unsigned long) &new_node->timer_context;
398 396
399 for (i = 0; i < win_size; ++i) 397 for (i = 0; i < win_size; ++i)
400 new_node->rx_reorder_ptr[i] = NULL; 398 new_node->rx_reorder_ptr[i] = NULL;
@@ -468,10 +466,10 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
468 sta_ptr = mwifiex_get_sta_entry(priv, 466 sta_ptr = mwifiex_get_sta_entry(priv,
469 cmd_addba_req->peer_mac_addr); 467 cmd_addba_req->peer_mac_addr);
470 if (!sta_ptr) { 468 if (!sta_ptr) {
469 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
471 dev_warn(priv->adapter->dev, 470 dev_warn(priv->adapter->dev,
472 "BA setup with unknown TDLS peer %pM!\n", 471 "BA setup with unknown TDLS peer %pM!\n",
473 cmd_addba_req->peer_mac_addr); 472 cmd_addba_req->peer_mac_addr);
474 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
475 return -1; 473 return -1;
476 } 474 }
477 if (sta_ptr->is_11ac_enabled) 475 if (sta_ptr->is_11ac_enabled)
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 9487d728ac20..fdfd9bf15ed4 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -53,3 +53,5 @@ obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o
53 53
54mwifiex_usb-y += usb.o 54mwifiex_usb-y += usb.o
55obj-$(CONFIG_MWIFIEX_USB) += mwifiex_usb.o 55obj-$(CONFIG_MWIFIEX_USB) += mwifiex_usb.o
56
57ccflags-y += -D__CHECK_ENDIAN
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 4a66a6555366..41c8e25df954 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -590,77 +590,62 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
590 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); 590 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
591 struct mwifiex_private *priv; 591 struct mwifiex_private *priv;
592 struct mwifiex_uap_bss_param *bss_cfg; 592 struct mwifiex_uap_bss_param *bss_cfg;
593 int ret, bss_started, i; 593 int ret;
594
595 for (i = 0; i < adapter->priv_num; i++) {
596 priv = adapter->priv[i];
597
598 switch (priv->bss_role) {
599 case MWIFIEX_BSS_ROLE_UAP:
600 bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param),
601 GFP_KERNEL);
602 if (!bss_cfg)
603 return -ENOMEM;
604
605 mwifiex_set_sys_config_invalid_data(bss_cfg);
606
607 if (changed & WIPHY_PARAM_RTS_THRESHOLD)
608 bss_cfg->rts_threshold = wiphy->rts_threshold;
609 if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
610 bss_cfg->frag_threshold = wiphy->frag_threshold;
611 if (changed & WIPHY_PARAM_RETRY_LONG)
612 bss_cfg->retry_limit = wiphy->retry_long;
613
614 bss_started = priv->bss_started;
615
616 ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
617 HostCmd_ACT_GEN_SET, 0,
618 NULL, true);
619 if (ret) {
620 wiphy_err(wiphy, "Failed to stop the BSS\n");
621 kfree(bss_cfg);
622 return ret;
623 }
624 594
625 ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG, 595 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
626 HostCmd_ACT_GEN_SET,
627 UAP_BSS_PARAMS_I, bss_cfg,
628 false);
629 596
630 kfree(bss_cfg); 597 switch (priv->bss_role) {
598 case MWIFIEX_BSS_ROLE_UAP:
599 if (priv->bss_started) {
600 dev_err(adapter->dev,
601 "cannot change wiphy params when bss started");
602 return -EINVAL;
603 }
631 604
632 if (ret) { 605 bss_cfg = kzalloc(sizeof(*bss_cfg), GFP_KERNEL);
633 wiphy_err(wiphy, "Failed to set bss config\n"); 606 if (!bss_cfg)
634 return ret; 607 return -ENOMEM;
635 }
636 608
637 if (!bss_started) 609 mwifiex_set_sys_config_invalid_data(bss_cfg);
638 break;
639 610
640 ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START, 611 if (changed & WIPHY_PARAM_RTS_THRESHOLD)
641 HostCmd_ACT_GEN_SET, 0, 612 bss_cfg->rts_threshold = wiphy->rts_threshold;
642 NULL, false); 613 if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
643 if (ret) { 614 bss_cfg->frag_threshold = wiphy->frag_threshold;
644 wiphy_err(wiphy, "Failed to start BSS\n"); 615 if (changed & WIPHY_PARAM_RETRY_LONG)
645 return ret; 616 bss_cfg->retry_limit = wiphy->retry_long;
646 } 617
618 ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
619 HostCmd_ACT_GEN_SET,
620 UAP_BSS_PARAMS_I, bss_cfg,
621 false);
622
623 kfree(bss_cfg);
624 if (ret) {
625 wiphy_err(wiphy, "Failed to set wiphy phy params\n");
626 return ret;
627 }
628 break;
647 629
648 break;
649 case MWIFIEX_BSS_ROLE_STA: 630 case MWIFIEX_BSS_ROLE_STA:
650 if (changed & WIPHY_PARAM_RTS_THRESHOLD) { 631 if (priv->media_connected) {
651 ret = mwifiex_set_rts(priv, 632 dev_err(adapter->dev,
652 wiphy->rts_threshold); 633 "cannot change wiphy params when connected");
653 if (ret) 634 return -EINVAL;
654 return ret;
655 }
656 if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
657 ret = mwifiex_set_frag(priv,
658 wiphy->frag_threshold);
659 if (ret)
660 return ret;
661 }
662 break;
663 } 635 }
636 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
637 ret = mwifiex_set_rts(priv,
638 wiphy->rts_threshold);
639 if (ret)
640 return ret;
641 }
642 if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
643 ret = mwifiex_set_frag(priv,
644 wiphy->frag_threshold);
645 if (ret)
646 return ret;
647 }
648 break;
664 } 649 }
665 650
666 return 0; 651 return 0;
@@ -671,9 +656,6 @@ mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
671{ 656{
672 u16 mode = P2P_MODE_DISABLE; 657 u16 mode = P2P_MODE_DISABLE;
673 658
674 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
675 mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
676
677 if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG, 659 if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
678 HostCmd_ACT_GEN_SET, 0, &mode, true)) 660 HostCmd_ACT_GEN_SET, 0, &mode, true))
679 return -1; 661 return -1;
@@ -730,12 +712,249 @@ mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
730 HostCmd_ACT_GEN_SET, 0, &mode, true)) 712 HostCmd_ACT_GEN_SET, 0, &mode, true))
731 return -1; 713 return -1;
732 714
733 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) 715 return 0;
734 mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_UAP); 716}
717
718static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
719{
720 priv->mgmt_frame_mask = 0;
721 if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
722 HostCmd_ACT_GEN_SET, 0,
723 &priv->mgmt_frame_mask, false)) {
724 dev_warn(priv->adapter->dev,
725 "could not unregister mgmt frame rx\n");
726 return -1;
727 }
728
729 mwifiex_deauthenticate(priv, NULL);
730 mwifiex_free_priv(priv);
731 priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
732 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
733 priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
734
735 return 0;
736}
737
738static int
739mwifiex_init_new_priv_params(struct mwifiex_private *priv,
740 struct net_device *dev,
741 enum nl80211_iftype type)
742{
743 mwifiex_init_priv(priv);
744
745 priv->bss_mode = type;
746 priv->wdev.iftype = type;
747
748 mwifiex_init_priv_params(priv, priv->netdev);
749 priv->bss_started = 0;
750
751 switch (type) {
752 case NL80211_IFTYPE_STATION:
753 case NL80211_IFTYPE_ADHOC:
754 priv->bss_role = MWIFIEX_BSS_ROLE_STA;
755 priv->bss_type = MWIFIEX_BSS_TYPE_STA;
756 break;
757 case NL80211_IFTYPE_P2P_CLIENT:
758 case NL80211_IFTYPE_P2P_GO:
759 priv->bss_role = MWIFIEX_BSS_ROLE_STA;
760 priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
761 break;
762 case NL80211_IFTYPE_AP:
763 priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
764 priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
765 break;
766 default:
767 dev_err(priv->adapter->dev,
768 "%s: changing to %d not supported\n",
769 dev->name, type);
770 return -EOPNOTSUPP;
771 }
735 772
736 return 0; 773 return 0;
737} 774}
738 775
776static int
777mwifiex_change_vif_to_p2p(struct net_device *dev,
778 enum nl80211_iftype curr_iftype,
779 enum nl80211_iftype type, u32 *flags,
780 struct vif_params *params)
781{
782 struct mwifiex_private *priv;
783 struct mwifiex_adapter *adapter;
784
785 priv = mwifiex_netdev_get_priv(dev);
786
787 if (!priv)
788 return -1;
789
790 adapter = priv->adapter;
791
792 if (adapter->curr_iface_comb.p2p_intf ==
793 adapter->iface_limit.p2p_intf) {
794 dev_err(adapter->dev,
795 "cannot create multiple P2P ifaces\n");
796 return -1;
797 }
798
799 dev_dbg(priv->adapter->dev, "%s: changing role to p2p\n", dev->name);
800
801 if (mwifiex_deinit_priv_params(priv))
802 return -1;
803 if (mwifiex_init_new_priv_params(priv, dev, type))
804 return -1;
805
806 switch (type) {
807 case NL80211_IFTYPE_P2P_CLIENT:
808 if (mwifiex_cfg80211_init_p2p_client(priv))
809 return -EFAULT;
810 break;
811 case NL80211_IFTYPE_P2P_GO:
812 if (mwifiex_cfg80211_init_p2p_go(priv))
813 return -EFAULT;
814 break;
815 default:
816 dev_err(priv->adapter->dev,
817 "%s: changing to %d not supported\n",
818 dev->name, type);
819 return -EOPNOTSUPP;
820 }
821
822 if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
823 HostCmd_ACT_GEN_SET, 0, NULL, true))
824 return -1;
825
826 if (mwifiex_sta_init_cmd(priv, false, false))
827 return -1;
828
829 switch (curr_iftype) {
830 case NL80211_IFTYPE_STATION:
831 case NL80211_IFTYPE_ADHOC:
832 adapter->curr_iface_comb.sta_intf--;
833 break;
834 case NL80211_IFTYPE_AP:
835 adapter->curr_iface_comb.uap_intf--;
836 break;
837 default:
838 break;
839 }
840
841 adapter->curr_iface_comb.p2p_intf++;
842 dev->ieee80211_ptr->iftype = type;
843
844 return 0;
845}
846
847static int
848mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
849 enum nl80211_iftype curr_iftype,
850 enum nl80211_iftype type, u32 *flags,
851 struct vif_params *params)
852{
853 struct mwifiex_private *priv;
854 struct mwifiex_adapter *adapter;
855
856 priv = mwifiex_netdev_get_priv(dev);
857
858 if (!priv)
859 return -1;
860
861 adapter = priv->adapter;
862
863 if ((curr_iftype != NL80211_IFTYPE_P2P_CLIENT &&
864 curr_iftype != NL80211_IFTYPE_P2P_GO) &&
865 (adapter->curr_iface_comb.sta_intf ==
866 adapter->iface_limit.sta_intf)) {
867 dev_err(adapter->dev,
868 "cannot create multiple station/adhoc ifaces\n");
869 return -1;
870 }
871
872 if (type == NL80211_IFTYPE_STATION)
873 dev_notice(adapter->dev,
874 "%s: changing role to station\n", dev->name);
875 else
876 dev_notice(adapter->dev,
877 "%s: changing role to adhoc\n", dev->name);
878
879 if (mwifiex_deinit_priv_params(priv))
880 return -1;
881 if (mwifiex_init_new_priv_params(priv, dev, type))
882 return -1;
883 if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
884 HostCmd_ACT_GEN_SET, 0, NULL, true))
885 return -1;
886 if (mwifiex_sta_init_cmd(priv, false, false))
887 return -1;
888
889 switch (curr_iftype) {
890 case NL80211_IFTYPE_P2P_CLIENT:
891 case NL80211_IFTYPE_P2P_GO:
892 adapter->curr_iface_comb.p2p_intf--;
893 break;
894 case NL80211_IFTYPE_AP:
895 adapter->curr_iface_comb.uap_intf--;
896 break;
897 default:
898 break;
899 }
900
901 adapter->curr_iface_comb.sta_intf++;
902 dev->ieee80211_ptr->iftype = type;
903 return 0;
904}
905
906static int
907mwifiex_change_vif_to_ap(struct net_device *dev,
908 enum nl80211_iftype curr_iftype,
909 enum nl80211_iftype type, u32 *flags,
910 struct vif_params *params)
911{
912 struct mwifiex_private *priv;
913 struct mwifiex_adapter *adapter;
914
915 priv = mwifiex_netdev_get_priv(dev);
916
917 if (!priv)
918 return -1;
919
920 adapter = priv->adapter;
921
922 if (adapter->curr_iface_comb.uap_intf ==
923 adapter->iface_limit.uap_intf) {
924 dev_err(adapter->dev,
925 "cannot create multiple AP ifaces\n");
926 return -1;
927 }
928
929 dev_notice(adapter->dev, "%s: changing role to AP\n", dev->name);
930
931 if (mwifiex_deinit_priv_params(priv))
932 return -1;
933 if (mwifiex_init_new_priv_params(priv, dev, type))
934 return -1;
935 if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
936 HostCmd_ACT_GEN_SET, 0, NULL, true))
937 return -1;
938 if (mwifiex_sta_init_cmd(priv, false, false))
939 return -1;
940
941 switch (curr_iftype) {
942 case NL80211_IFTYPE_P2P_CLIENT:
943 case NL80211_IFTYPE_P2P_GO:
944 adapter->curr_iface_comb.p2p_intf--;
945 break;
946 case NL80211_IFTYPE_STATION:
947 case NL80211_IFTYPE_ADHOC:
948 adapter->curr_iface_comb.sta_intf--;
949 break;
950 default:
951 break;
952 }
953
954 adapter->curr_iface_comb.uap_intf++;
955 dev->ieee80211_ptr->iftype = type;
956 return 0;
957}
739/* 958/*
740 * CFG802.11 operation handler to change interface type. 959 * CFG802.11 operation handler to change interface type.
741 */ 960 */
@@ -745,19 +964,32 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
745 enum nl80211_iftype type, u32 *flags, 964 enum nl80211_iftype type, u32 *flags,
746 struct vif_params *params) 965 struct vif_params *params)
747{ 966{
748 int ret;
749 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 967 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
968 enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype;
750 969
751 switch (dev->ieee80211_ptr->iftype) { 970 switch (curr_iftype) {
752 case NL80211_IFTYPE_ADHOC: 971 case NL80211_IFTYPE_ADHOC:
753 switch (type) { 972 switch (type) {
754 case NL80211_IFTYPE_STATION: 973 case NL80211_IFTYPE_STATION:
755 break; 974 priv->bss_mode = type;
975 priv->sec_info.authentication_mode =
976 NL80211_AUTHTYPE_OPEN_SYSTEM;
977 dev->ieee80211_ptr->iftype = type;
978 mwifiex_deauthenticate(priv, NULL);
979 return mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
980 HostCmd_ACT_GEN_SET, 0, NULL,
981 true);
982 case NL80211_IFTYPE_P2P_CLIENT:
983 case NL80211_IFTYPE_P2P_GO:
984 return mwifiex_change_vif_to_p2p(dev, curr_iftype,
985 type, flags, params);
986 case NL80211_IFTYPE_AP:
987 return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
988 flags, params);
756 case NL80211_IFTYPE_UNSPECIFIED: 989 case NL80211_IFTYPE_UNSPECIFIED:
757 wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name); 990 wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name);
758 case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */ 991 case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
759 return 0; 992 return 0;
760 case NL80211_IFTYPE_AP:
761 default: 993 default:
762 wiphy_err(wiphy, "%s: changing to %d not supported\n", 994 wiphy_err(wiphy, "%s: changing to %d not supported\n",
763 dev->name, type); 995 dev->name, type);
@@ -767,22 +999,25 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
767 case NL80211_IFTYPE_STATION: 999 case NL80211_IFTYPE_STATION:
768 switch (type) { 1000 switch (type) {
769 case NL80211_IFTYPE_ADHOC: 1001 case NL80211_IFTYPE_ADHOC:
770 break; 1002 priv->bss_mode = type;
771 case NL80211_IFTYPE_P2P_CLIENT: 1003 priv->sec_info.authentication_mode =
772 if (mwifiex_cfg80211_init_p2p_client(priv)) 1004 NL80211_AUTHTYPE_OPEN_SYSTEM;
773 return -EFAULT;
774 dev->ieee80211_ptr->iftype = type; 1005 dev->ieee80211_ptr->iftype = type;
775 return 0; 1006 mwifiex_deauthenticate(priv, NULL);
1007 return mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
1008 HostCmd_ACT_GEN_SET, 0, NULL,
1009 true);
1010 case NL80211_IFTYPE_P2P_CLIENT:
776 case NL80211_IFTYPE_P2P_GO: 1011 case NL80211_IFTYPE_P2P_GO:
777 if (mwifiex_cfg80211_init_p2p_go(priv)) 1012 return mwifiex_change_vif_to_p2p(dev, curr_iftype,
778 return -EFAULT; 1013 type, flags, params);
779 dev->ieee80211_ptr->iftype = type; 1014 case NL80211_IFTYPE_AP:
780 return 0; 1015 return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
1016 flags, params);
781 case NL80211_IFTYPE_UNSPECIFIED: 1017 case NL80211_IFTYPE_UNSPECIFIED:
782 wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name); 1018 wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
783 case NL80211_IFTYPE_STATION: /* This shouldn't happen */ 1019 case NL80211_IFTYPE_STATION: /* This shouldn't happen */
784 return 0; 1020 return 0;
785 case NL80211_IFTYPE_AP:
786 default: 1021 default:
787 wiphy_err(wiphy, "%s: changing to %d not supported\n", 1022 wiphy_err(wiphy, "%s: changing to %d not supported\n",
788 dev->name, type); 1023 dev->name, type);
@@ -791,12 +1026,20 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
791 break; 1026 break;
792 case NL80211_IFTYPE_AP: 1027 case NL80211_IFTYPE_AP:
793 switch (type) { 1028 switch (type) {
1029 case NL80211_IFTYPE_ADHOC:
1030 case NL80211_IFTYPE_STATION:
1031 return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
1032 type, flags,
1033 params);
1034 break;
1035 case NL80211_IFTYPE_P2P_CLIENT:
1036 case NL80211_IFTYPE_P2P_GO:
1037 return mwifiex_change_vif_to_p2p(dev, curr_iftype,
1038 type, flags, params);
794 case NL80211_IFTYPE_UNSPECIFIED: 1039 case NL80211_IFTYPE_UNSPECIFIED:
795 wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name); 1040 wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name);
796 case NL80211_IFTYPE_AP: /* This shouldn't happen */ 1041 case NL80211_IFTYPE_AP: /* This shouldn't happen */
797 return 0; 1042 return 0;
798 case NL80211_IFTYPE_ADHOC:
799 case NL80211_IFTYPE_STATION:
800 default: 1043 default:
801 wiphy_err(wiphy, "%s: changing to %d not supported\n", 1044 wiphy_err(wiphy, "%s: changing to %d not supported\n",
802 dev->name, type); 1045 dev->name, type);
@@ -807,11 +1050,30 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
807 case NL80211_IFTYPE_P2P_GO: 1050 case NL80211_IFTYPE_P2P_GO:
808 switch (type) { 1051 switch (type) {
809 case NL80211_IFTYPE_STATION: 1052 case NL80211_IFTYPE_STATION:
810 if (mwifiex_cfg80211_deinit_p2p(priv)) 1053 if (mwifiex_cfg80211_init_p2p_client(priv))
811 return -EFAULT; 1054 return -EFAULT;
812 dev->ieee80211_ptr->iftype = type; 1055 dev->ieee80211_ptr->iftype = type;
1056 break;
1057 case NL80211_IFTYPE_ADHOC:
1058 if (mwifiex_cfg80211_deinit_p2p(priv))
1059 return -EFAULT;
1060 return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
1061 type, flags,
1062 params);
1063 break;
1064 case NL80211_IFTYPE_AP:
1065 if (mwifiex_cfg80211_deinit_p2p(priv))
1066 return -EFAULT;
1067 return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
1068 flags, params);
1069 case NL80211_IFTYPE_UNSPECIFIED:
1070 wiphy_warn(wiphy, "%s: kept type as P2P\n", dev->name);
1071 case NL80211_IFTYPE_P2P_CLIENT:
1072 case NL80211_IFTYPE_P2P_GO:
813 return 0; 1073 return 0;
814 default: 1074 default:
1075 wiphy_err(wiphy, "%s: changing to %d not supported\n",
1076 dev->name, type);
815 return -EOPNOTSUPP; 1077 return -EOPNOTSUPP;
816 } 1078 }
817 break; 1079 break;
@@ -821,16 +1083,8 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
821 return -EOPNOTSUPP; 1083 return -EOPNOTSUPP;
822 } 1084 }
823 1085
824 dev->ieee80211_ptr->iftype = type;
825 priv->bss_mode = type;
826 mwifiex_deauthenticate(priv, NULL);
827
828 priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
829
830 ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
831 HostCmd_ACT_GEN_SET, 0, NULL, true);
832 1086
833 return ret; 1087 return 0;
834} 1088}
835 1089
836static void 1090static void
@@ -856,16 +1110,16 @@ mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo,
856 /* HT or VHT */ 1110 /* HT or VHT */
857 switch (tx_htinfo & (BIT(3) | BIT(2))) { 1111 switch (tx_htinfo & (BIT(3) | BIT(2))) {
858 case 0: 1112 case 0:
859 /* This will be 20MHz */ 1113 rate->bw = RATE_INFO_BW_20;
860 break; 1114 break;
861 case (BIT(2)): 1115 case (BIT(2)):
862 rate->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; 1116 rate->bw = RATE_INFO_BW_40;
863 break; 1117 break;
864 case (BIT(3)): 1118 case (BIT(3)):
865 rate->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH; 1119 rate->bw = RATE_INFO_BW_80;
866 break; 1120 break;
867 case (BIT(3) | BIT(2)): 1121 case (BIT(3) | BIT(2)):
868 rate->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH; 1122 rate->bw = RATE_INFO_BW_160;
869 break; 1123 break;
870 } 1124 }
871 1125
@@ -885,8 +1139,9 @@ mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo,
885 if ((tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) { 1139 if ((tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
886 rate->mcs = priv->tx_rate; 1140 rate->mcs = priv->tx_rate;
887 rate->flags |= RATE_INFO_FLAGS_MCS; 1141 rate->flags |= RATE_INFO_FLAGS_MCS;
1142 rate->bw = RATE_INFO_BW_20;
888 if (tx_htinfo & BIT(1)) 1143 if (tx_htinfo & BIT(1))
889 rate->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; 1144 rate->bw = RATE_INFO_BW_40;
890 if (tx_htinfo & BIT(2)) 1145 if (tx_htinfo & BIT(2))
891 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 1146 rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
892 } 1147 }
@@ -910,10 +1165,10 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
910{ 1165{
911 u32 rate; 1166 u32 rate;
912 1167
913 sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES | 1168 sinfo->filled = BIT(NL80211_STA_INFO_RX_BYTES) | BIT(NL80211_STA_INFO_TX_BYTES) |
914 STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS | 1169 BIT(NL80211_STA_INFO_RX_PACKETS) | BIT(NL80211_STA_INFO_TX_PACKETS) |
915 STATION_INFO_TX_BITRATE | 1170 BIT(NL80211_STA_INFO_TX_BITRATE) |
916 STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG; 1171 BIT(NL80211_STA_INFO_SIGNAL) | BIT(NL80211_STA_INFO_SIGNAL_AVG);
917 1172
918 /* Get signal information from the firmware */ 1173 /* Get signal information from the firmware */
919 if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO, 1174 if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
@@ -944,7 +1199,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
944 sinfo->txrate.legacy = rate * 5; 1199 sinfo->txrate.legacy = rate * 5;
945 1200
946 if (priv->bss_mode == NL80211_IFTYPE_STATION) { 1201 if (priv->bss_mode == NL80211_IFTYPE_STATION) {
947 sinfo->filled |= STATION_INFO_BSS_PARAM; 1202 sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
948 sinfo->bss_param.flags = 0; 1203 sinfo->bss_param.flags = 0;
949 if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap & 1204 if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap &
950 WLAN_CAPABILITY_SHORT_PREAMBLE) 1205 WLAN_CAPABILITY_SHORT_PREAMBLE)
@@ -1037,10 +1292,11 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
1037 survey->channel = ieee80211_get_channel(wiphy, 1292 survey->channel = ieee80211_get_channel(wiphy,
1038 ieee80211_channel_to_frequency(pchan_stats[idx].chan_num, band)); 1293 ieee80211_channel_to_frequency(pchan_stats[idx].chan_num, band));
1039 survey->filled = SURVEY_INFO_NOISE_DBM | 1294 survey->filled = SURVEY_INFO_NOISE_DBM |
1040 SURVEY_INFO_CHANNEL_TIME | SURVEY_INFO_CHANNEL_TIME_BUSY; 1295 SURVEY_INFO_TIME |
1296 SURVEY_INFO_TIME_BUSY;
1041 survey->noise = pchan_stats[idx].noise; 1297 survey->noise = pchan_stats[idx].noise;
1042 survey->channel_time = pchan_stats[idx].cca_scan_dur; 1298 survey->time = pchan_stats[idx].cca_scan_dur;
1043 survey->channel_time_busy = pchan_stats[idx].cca_busy_dur; 1299 survey->time_busy = pchan_stats[idx].cca_busy_dur;
1044 1300
1045 return 0; 1301 return 0;
1046} 1302}
@@ -1395,10 +1651,13 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1395{ 1651{
1396 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1652 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1397 1653
1654 mwifiex_abort_cac(priv);
1655
1398 if (mwifiex_del_mgmt_ies(priv)) 1656 if (mwifiex_del_mgmt_ies(priv))
1399 wiphy_err(wiphy, "Failed to delete mgmt IEs!\n"); 1657 wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
1400 1658
1401 priv->ap_11n_enabled = 0; 1659 priv->ap_11n_enabled = 0;
1660 memset(&priv->bss_cfg, 0, sizeof(priv->bss_cfg));
1402 1661
1403 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP, 1662 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
1404 HostCmd_ACT_GEN_SET, 0, NULL, true)) { 1663 HostCmd_ACT_GEN_SET, 0, NULL, true)) {
@@ -1420,12 +1679,9 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
1420{ 1679{
1421 struct mwifiex_uap_bss_param *bss_cfg; 1680 struct mwifiex_uap_bss_param *bss_cfg;
1422 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1681 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1423 u8 config_bands = 0;
1424 1682
1425 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) 1683 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
1426 return -1; 1684 return -1;
1427 if (mwifiex_set_mgmt_ies(priv, &params->beacon))
1428 return -1;
1429 1685
1430 bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL); 1686 bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
1431 if (!bss_cfg) 1687 if (!bss_cfg)
@@ -1442,6 +1698,11 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
1442 memcpy(bss_cfg->ssid.ssid, params->ssid, params->ssid_len); 1698 memcpy(bss_cfg->ssid.ssid, params->ssid, params->ssid_len);
1443 bss_cfg->ssid.ssid_len = params->ssid_len; 1699 bss_cfg->ssid.ssid_len = params->ssid_len;
1444 } 1700 }
1701 if (params->inactivity_timeout > 0) {
1702 /* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */
1703 bss_cfg->sta_ao_timer = 10 * params->inactivity_timeout;
1704 bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
1705 }
1445 1706
1446 switch (params->hidden_ssid) { 1707 switch (params->hidden_ssid) {
1447 case NL80211_HIDDEN_SSID_NOT_IN_USE: 1708 case NL80211_HIDDEN_SSID_NOT_IN_USE:
@@ -1457,33 +1718,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
1457 return -EINVAL; 1718 return -EINVAL;
1458 } 1719 }
1459 1720
1460 bss_cfg->channel = ieee80211_frequency_to_channel( 1721 mwifiex_uap_set_channel(bss_cfg, params->chandef);
1461 params->chandef.chan->center_freq);
1462
1463 /* Set appropriate bands */
1464 if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
1465 bss_cfg->band_cfg = BAND_CONFIG_BG;
1466 config_bands = BAND_B | BAND_G;
1467
1468 if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
1469 config_bands |= BAND_GN;
1470 } else {
1471 bss_cfg->band_cfg = BAND_CONFIG_A;
1472 config_bands = BAND_A;
1473
1474 if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
1475 config_bands |= BAND_AN;
1476
1477 if (params->chandef.width > NL80211_CHAN_WIDTH_40)
1478 config_bands |= BAND_AAC;
1479 }
1480
1481 if (!((config_bands | priv->adapter->fw_bands) &
1482 ~priv->adapter->fw_bands))
1483 priv->adapter->config_bands = config_bands;
1484
1485 mwifiex_set_uap_rates(bss_cfg, params); 1722 mwifiex_set_uap_rates(bss_cfg, params);
1486 mwifiex_send_domain_info_cmd_fw(wiphy);
1487 1723
1488 if (mwifiex_set_secure_params(priv, bss_cfg, params)) { 1724 if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
1489 kfree(bss_cfg); 1725 kfree(bss_cfg);
@@ -1506,45 +1742,29 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
1506 1742
1507 mwifiex_set_wmm_params(priv, bss_cfg, params); 1743 mwifiex_set_wmm_params(priv, bss_cfg, params);
1508 1744
1509 if (params->inactivity_timeout > 0) { 1745 if (mwifiex_is_11h_active(priv) &&
1510 /* sta_ao_timer/ps_sta_ao_timer is in unit of 100ms */ 1746 !cfg80211_chandef_dfs_required(wiphy, &params->chandef,
1511 bss_cfg->sta_ao_timer = 10 * params->inactivity_timeout; 1747 priv->bss_mode)) {
1512 bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout; 1748 dev_dbg(priv->adapter->dev, "Disable 11h extensions in FW\n");
1749 if (mwifiex_11h_activate(priv, false)) {
1750 dev_err(priv->adapter->dev,
1751 "Failed to disable 11h extensions!!");
1752 return -1;
1753 }
1754 priv->state_11h.is_11h_active = true;
1513 } 1755 }
1514 1756
1515 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP, 1757 if (mwifiex_config_start_uap(priv, bss_cfg)) {
1516 HostCmd_ACT_GEN_SET, 0, NULL, true)) { 1758 wiphy_err(wiphy, "Failed to start AP\n");
1517 wiphy_err(wiphy, "Failed to stop the BSS\n");
1518 kfree(bss_cfg); 1759 kfree(bss_cfg);
1519 return -1; 1760 return -1;
1520 } 1761 }
1521 1762
1522 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG, 1763 if (mwifiex_set_mgmt_ies(priv, &params->beacon))
1523 HostCmd_ACT_GEN_SET,
1524 UAP_BSS_PARAMS_I, bss_cfg, false)) {
1525 wiphy_err(wiphy, "Failed to set the SSID\n");
1526 kfree(bss_cfg);
1527 return -1; 1764 return -1;
1528 }
1529 1765
1766 memcpy(&priv->bss_cfg, bss_cfg, sizeof(priv->bss_cfg));
1530 kfree(bss_cfg); 1767 kfree(bss_cfg);
1531
1532 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
1533 HostCmd_ACT_GEN_SET, 0, NULL, false)) {
1534 wiphy_err(wiphy, "Failed to start the BSS\n");
1535 return -1;
1536 }
1537
1538 if (priv->sec_info.wep_enabled)
1539 priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
1540 else
1541 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
1542
1543 if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
1544 HostCmd_ACT_GEN_SET, 0,
1545 &priv->curr_pkt_filter, true))
1546 return -1;
1547
1548 return 0; 1768 return 0;
1549} 1769}
1550 1770
@@ -1603,15 +1823,15 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
1603 ie_len = ie_buf[1] + sizeof(struct ieee_types_header); 1823 ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
1604 1824
1605 band = mwifiex_band_to_radio_type(priv->curr_bss_params.band); 1825 band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
1606 chan = __ieee80211_get_channel(priv->wdev->wiphy, 1826 chan = __ieee80211_get_channel(priv->wdev.wiphy,
1607 ieee80211_channel_to_frequency(bss_info.bss_chan, 1827 ieee80211_channel_to_frequency(bss_info.bss_chan,
1608 band)); 1828 band));
1609 1829
1610 bss = cfg80211_inform_bss(priv->wdev->wiphy, chan, 1830 bss = cfg80211_inform_bss(priv->wdev.wiphy, chan,
1611 CFG80211_BSS_FTYPE_UNKNOWN, 1831 CFG80211_BSS_FTYPE_UNKNOWN,
1612 bss_info.bssid, 0, WLAN_CAPABILITY_IBSS, 1832 bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
1613 0, ie_buf, ie_len, 0, GFP_KERNEL); 1833 0, ie_buf, ie_len, 0, GFP_KERNEL);
1614 cfg80211_put_bss(priv->wdev->wiphy, bss); 1834 cfg80211_put_bss(priv->wdev.wiphy, bss);
1615 memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN); 1835 memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
1616 1836
1617 return 0; 1837 return 0;
@@ -1732,12 +1952,12 @@ done:
1732 1952
1733 /* Find the BSS we want using available scan results */ 1953 /* Find the BSS we want using available scan results */
1734 if (mode == NL80211_IFTYPE_ADHOC) 1954 if (mode == NL80211_IFTYPE_ADHOC)
1735 bss = cfg80211_get_bss(priv->wdev->wiphy, channel, 1955 bss = cfg80211_get_bss(priv->wdev.wiphy, channel,
1736 bssid, ssid, ssid_len, 1956 bssid, ssid, ssid_len,
1737 WLAN_CAPABILITY_IBSS, 1957 WLAN_CAPABILITY_IBSS,
1738 WLAN_CAPABILITY_IBSS); 1958 WLAN_CAPABILITY_IBSS);
1739 else 1959 else
1740 bss = cfg80211_get_bss(priv->wdev->wiphy, channel, 1960 bss = cfg80211_get_bss(priv->wdev.wiphy, channel,
1741 bssid, ssid, ssid_len, 1961 bssid, ssid, ssid_len,
1742 WLAN_CAPABILITY_ESS, 1962 WLAN_CAPABILITY_ESS,
1743 WLAN_CAPABILITY_ESS); 1963 WLAN_CAPABILITY_ESS);
@@ -1784,6 +2004,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
1784 struct cfg80211_connect_params *sme) 2004 struct cfg80211_connect_params *sme)
1785{ 2005{
1786 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 2006 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
2007 struct mwifiex_adapter *adapter = priv->adapter;
1787 int ret; 2008 int ret;
1788 2009
1789 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) { 2010 if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
@@ -1793,11 +2014,18 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
1793 return -EINVAL; 2014 return -EINVAL;
1794 } 2015 }
1795 2016
1796 if (priv->wdev && priv->wdev->current_bss) { 2017 if (priv->wdev.current_bss) {
1797 wiphy_warn(wiphy, "%s: already connected\n", dev->name); 2018 wiphy_warn(wiphy, "%s: already connected\n", dev->name);
1798 return -EALREADY; 2019 return -EALREADY;
1799 } 2020 }
1800 2021
2022 if (adapter->surprise_removed || adapter->is_cmd_timedout) {
2023 wiphy_err(wiphy,
2024 "%s: Ignore connection. Card removed or FW in bad state\n",
2025 dev->name);
2026 return -EFAULT;
2027 }
2028
1801 wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n", 2029 wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
1802 (char *) sme->ssid, sme->bssid); 2030 (char *) sme->ssid, sme->bssid);
1803 2031
@@ -1844,7 +2072,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
1844static int mwifiex_set_ibss_params(struct mwifiex_private *priv, 2072static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
1845 struct cfg80211_ibss_params *params) 2073 struct cfg80211_ibss_params *params)
1846{ 2074{
1847 struct wiphy *wiphy = priv->wdev->wiphy; 2075 struct wiphy *wiphy = priv->wdev.wiphy;
1848 struct mwifiex_adapter *adapter = priv->adapter; 2076 struct mwifiex_adapter *adapter = priv->adapter;
1849 int index = 0, i; 2077 int index = 0, i;
1850 u8 config_bands = 0; 2078 u8 config_bands = 0;
@@ -2169,6 +2397,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
2169 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 2397 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
2170} 2398}
2171 2399
2400#define MWIFIEX_MAX_WQ_LEN 30
2172/* 2401/*
2173 * create a new virtual interface with the given name 2402 * create a new virtual interface with the given name
2174 */ 2403 */
@@ -2182,7 +2411,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2182 struct mwifiex_private *priv; 2411 struct mwifiex_private *priv;
2183 struct net_device *dev; 2412 struct net_device *dev;
2184 void *mdev_priv; 2413 void *mdev_priv;
2185 struct wireless_dev *wdev; 2414 char dfs_cac_str[MWIFIEX_MAX_WQ_LEN], dfs_chsw_str[MWIFIEX_MAX_WQ_LEN];
2186 2415
2187 if (!adapter) 2416 if (!adapter)
2188 return ERR_PTR(-EFAULT); 2417 return ERR_PTR(-EFAULT);
@@ -2191,20 +2420,22 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2191 case NL80211_IFTYPE_UNSPECIFIED: 2420 case NL80211_IFTYPE_UNSPECIFIED:
2192 case NL80211_IFTYPE_STATION: 2421 case NL80211_IFTYPE_STATION:
2193 case NL80211_IFTYPE_ADHOC: 2422 case NL80211_IFTYPE_ADHOC:
2194 priv = adapter->priv[MWIFIEX_BSS_TYPE_STA]; 2423 if (adapter->curr_iface_comb.sta_intf ==
2195 if (priv->bss_mode) { 2424 adapter->iface_limit.sta_intf) {
2196 wiphy_err(wiphy, 2425 wiphy_err(wiphy,
2197 "cannot create multiple sta/adhoc ifaces\n"); 2426 "cannot create multiple sta/adhoc ifaces\n");
2198 return ERR_PTR(-EINVAL); 2427 return ERR_PTR(-EINVAL);
2199 } 2428 }
2200 2429
2201 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); 2430 priv = mwifiex_get_unused_priv(adapter);
2202 if (!wdev) 2431 if (!priv) {
2203 return ERR_PTR(-ENOMEM); 2432 wiphy_err(wiphy,
2433 "could not get free private struct\n");
2434 return ERR_PTR(-EFAULT);
2435 }
2204 2436
2205 wdev->wiphy = wiphy; 2437 priv->wdev.wiphy = wiphy;
2206 priv->wdev = wdev; 2438 priv->wdev.iftype = NL80211_IFTYPE_STATION;
2207 wdev->iftype = NL80211_IFTYPE_STATION;
2208 2439
2209 if (type == NL80211_IFTYPE_UNSPECIFIED) 2440 if (type == NL80211_IFTYPE_UNSPECIFIED)
2210 priv->bss_mode = NL80211_IFTYPE_STATION; 2441 priv->bss_mode = NL80211_IFTYPE_STATION;
@@ -2219,20 +2450,22 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2219 2450
2220 break; 2451 break;
2221 case NL80211_IFTYPE_AP: 2452 case NL80211_IFTYPE_AP:
2222 priv = adapter->priv[MWIFIEX_BSS_TYPE_UAP]; 2453 if (adapter->curr_iface_comb.uap_intf ==
2223 2454 adapter->iface_limit.uap_intf) {
2224 if (priv->bss_mode) { 2455 wiphy_err(wiphy,
2225 wiphy_err(wiphy, "Can't create multiple AP interfaces"); 2456 "cannot create multiple AP ifaces\n");
2226 return ERR_PTR(-EINVAL); 2457 return ERR_PTR(-EINVAL);
2227 } 2458 }
2228 2459
2229 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); 2460 priv = mwifiex_get_unused_priv(adapter);
2230 if (!wdev) 2461 if (!priv) {
2231 return ERR_PTR(-ENOMEM); 2462 wiphy_err(wiphy,
2463 "could not get free private struct\n");
2464 return ERR_PTR(-EFAULT);
2465 }
2232 2466
2233 priv->wdev = wdev; 2467 priv->wdev.wiphy = wiphy;
2234 wdev->wiphy = wiphy; 2468 priv->wdev.iftype = NL80211_IFTYPE_AP;
2235 wdev->iftype = NL80211_IFTYPE_AP;
2236 2469
2237 priv->bss_type = MWIFIEX_BSS_TYPE_UAP; 2470 priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
2238 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II; 2471 priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
@@ -2244,24 +2477,25 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2244 2477
2245 break; 2478 break;
2246 case NL80211_IFTYPE_P2P_CLIENT: 2479 case NL80211_IFTYPE_P2P_CLIENT:
2247 priv = adapter->priv[MWIFIEX_BSS_TYPE_P2P]; 2480 if (adapter->curr_iface_comb.p2p_intf ==
2248 2481 adapter->iface_limit.p2p_intf) {
2249 if (priv->bss_mode) { 2482 wiphy_err(wiphy,
2250 wiphy_err(wiphy, "Can't create multiple P2P ifaces"); 2483 "cannot create multiple P2P ifaces\n");
2251 return ERR_PTR(-EINVAL); 2484 return ERR_PTR(-EINVAL);
2252 } 2485 }
2253 2486
2254 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); 2487 priv = mwifiex_get_unused_priv(adapter);
2255 if (!wdev) 2488 if (!priv) {
2256 return ERR_PTR(-ENOMEM); 2489 wiphy_err(wiphy,
2257 2490 "could not get free private struct\n");
2258 priv->wdev = wdev; 2491 return ERR_PTR(-EFAULT);
2259 wdev->wiphy = wiphy; 2492 }
2260 2493
2494 priv->wdev.wiphy = wiphy;
2261 /* At start-up, wpa_supplicant tries to change the interface 2495 /* At start-up, wpa_supplicant tries to change the interface
2262 * to NL80211_IFTYPE_STATION if it is not managed mode. 2496 * to NL80211_IFTYPE_STATION if it is not managed mode.
2263 */ 2497 */
2264 wdev->iftype = NL80211_IFTYPE_P2P_CLIENT; 2498 priv->wdev.iftype = NL80211_IFTYPE_P2P_CLIENT;
2265 priv->bss_mode = NL80211_IFTYPE_P2P_CLIENT; 2499 priv->bss_mode = NL80211_IFTYPE_P2P_CLIENT;
2266 2500
2267 /* Setting bss_type to P2P tells firmware that this interface 2501 /* Setting bss_type to P2P tells firmware that this interface
@@ -2277,8 +2511,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2277 priv->bss_num = 0; 2511 priv->bss_num = 0;
2278 2512
2279 if (mwifiex_cfg80211_init_p2p_client(priv)) { 2513 if (mwifiex_cfg80211_init_p2p_client(priv)) {
2280 wdev = ERR_PTR(-EFAULT); 2514 memset(&priv->wdev, 0, sizeof(priv->wdev));
2281 goto done; 2515 priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
2516 return ERR_PTR(-EFAULT);
2282 } 2517 }
2283 2518
2284 break; 2519 break;
@@ -2292,9 +2527,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2292 IEEE80211_NUM_ACS, 1); 2527 IEEE80211_NUM_ACS, 1);
2293 if (!dev) { 2528 if (!dev) {
2294 wiphy_err(wiphy, "no memory available for netdevice\n"); 2529 wiphy_err(wiphy, "no memory available for netdevice\n");
2530 memset(&priv->wdev, 0, sizeof(priv->wdev));
2531 priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
2295 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; 2532 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
2296 wdev = ERR_PTR(-ENOMEM); 2533 return ERR_PTR(-ENOMEM);
2297 goto done;
2298 } 2534 }
2299 2535
2300 mwifiex_init_priv_params(priv, dev); 2536 mwifiex_init_priv_params(priv, dev);
@@ -2314,7 +2550,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2314 &wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv); 2550 &wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv);
2315 2551
2316 dev_net_set(dev, wiphy_net(wiphy)); 2552 dev_net_set(dev, wiphy_net(wiphy));
2317 dev->ieee80211_ptr = priv->wdev; 2553 dev->ieee80211_ptr = &priv->wdev;
2318 dev->ieee80211_ptr->iftype = priv->bss_mode; 2554 dev->ieee80211_ptr->iftype = priv->bss_mode;
2319 memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN); 2555 memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN);
2320 SET_NETDEV_DEV(dev, wiphy_dev(wiphy)); 2556 SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
@@ -2335,10 +2571,47 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2335 free_netdev(dev); 2571 free_netdev(dev);
2336 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; 2572 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
2337 priv->netdev = NULL; 2573 priv->netdev = NULL;
2338 wdev = ERR_PTR(-EFAULT); 2574 memset(&priv->wdev, 0, sizeof(priv->wdev));
2339 goto done; 2575 priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
2576 return ERR_PTR(-EFAULT);
2577 }
2578
2579 strcpy(dfs_cac_str, "MWIFIEX_DFS_CAC");
2580 strcat(dfs_cac_str, name);
2581 priv->dfs_cac_workqueue = alloc_workqueue(dfs_cac_str,
2582 WQ_HIGHPRI |
2583 WQ_MEM_RECLAIM |
2584 WQ_UNBOUND, 1);
2585 if (!priv->dfs_cac_workqueue) {
2586 wiphy_err(wiphy, "cannot register virtual network device\n");
2587 free_netdev(dev);
2588 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
2589 priv->netdev = NULL;
2590 memset(&priv->wdev, 0, sizeof(priv->wdev));
2591 priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
2592 return ERR_PTR(-ENOMEM);
2340 } 2593 }
2341 2594
2595 INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue);
2596
2597 strcpy(dfs_chsw_str, "MWIFIEX_DFS_CHSW");
2598 strcat(dfs_chsw_str, name);
2599 priv->dfs_chan_sw_workqueue = alloc_workqueue(dfs_chsw_str,
2600 WQ_HIGHPRI | WQ_UNBOUND |
2601 WQ_MEM_RECLAIM, 1);
2602 if (!priv->dfs_chan_sw_workqueue) {
2603 wiphy_err(wiphy, "cannot register virtual network device\n");
2604 free_netdev(dev);
2605 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
2606 priv->netdev = NULL;
2607 memset(&priv->wdev, 0, sizeof(priv->wdev));
2608 priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
2609 return ERR_PTR(-ENOMEM);
2610 }
2611
2612 INIT_DELAYED_WORK(&priv->dfs_chan_sw_work,
2613 mwifiex_dfs_chan_sw_work_queue);
2614
2342 sema_init(&priv->async_sem, 1); 2615 sema_init(&priv->async_sem, 1);
2343 2616
2344 dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name); 2617 dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
@@ -2347,13 +2620,24 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2347 mwifiex_dev_debugfs_init(priv); 2620 mwifiex_dev_debugfs_init(priv);
2348#endif 2621#endif
2349 2622
2350done: 2623 switch (type) {
2351 if (IS_ERR(wdev)) { 2624 case NL80211_IFTYPE_UNSPECIFIED:
2352 kfree(priv->wdev); 2625 case NL80211_IFTYPE_STATION:
2353 priv->wdev = NULL; 2626 case NL80211_IFTYPE_ADHOC:
2627 adapter->curr_iface_comb.sta_intf++;
2628 break;
2629 case NL80211_IFTYPE_AP:
2630 adapter->curr_iface_comb.uap_intf++;
2631 break;
2632 case NL80211_IFTYPE_P2P_CLIENT:
2633 adapter->curr_iface_comb.p2p_intf++;
2634 break;
2635 default:
2636 wiphy_err(wiphy, "type not supported\n");
2637 return ERR_PTR(-EINVAL);
2354 } 2638 }
2355 2639
2356 return wdev; 2640 return &priv->wdev;
2357} 2641}
2358EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf); 2642EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
2359 2643
@@ -2363,12 +2647,13 @@ EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
2363int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) 2647int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2364{ 2648{
2365 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); 2649 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
2650 struct mwifiex_adapter *adapter = priv->adapter;
2366 2651
2367#ifdef CONFIG_DEBUG_FS 2652#ifdef CONFIG_DEBUG_FS
2368 mwifiex_dev_debugfs_remove(priv); 2653 mwifiex_dev_debugfs_remove(priv);
2369#endif 2654#endif
2370 2655
2371 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter); 2656 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
2372 2657
2373 if (netif_carrier_ok(priv->netdev)) 2658 if (netif_carrier_ok(priv->netdev))
2374 netif_carrier_off(priv->netdev); 2659 netif_carrier_off(priv->netdev);
@@ -2376,16 +2661,48 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2376 if (wdev->netdev->reg_state == NETREG_REGISTERED) 2661 if (wdev->netdev->reg_state == NETREG_REGISTERED)
2377 unregister_netdevice(wdev->netdev); 2662 unregister_netdevice(wdev->netdev);
2378 2663
2664 if (priv->dfs_cac_workqueue) {
2665 flush_workqueue(priv->dfs_cac_workqueue);
2666 destroy_workqueue(priv->dfs_cac_workqueue);
2667 priv->dfs_cac_workqueue = NULL;
2668 }
2669
2670 if (priv->dfs_chan_sw_workqueue) {
2671 flush_workqueue(priv->dfs_chan_sw_workqueue);
2672 destroy_workqueue(priv->dfs_chan_sw_workqueue);
2673 priv->dfs_chan_sw_workqueue = NULL;
2674 }
2379 /* Clear the priv in adapter */ 2675 /* Clear the priv in adapter */
2380 priv->netdev->ieee80211_ptr = NULL; 2676 priv->netdev->ieee80211_ptr = NULL;
2381 priv->netdev = NULL; 2677 priv->netdev = NULL;
2382 kfree(wdev); 2678 priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
2383 priv->wdev = NULL;
2384 2679
2385 priv->media_connected = false; 2680 priv->media_connected = false;
2386 2681
2682 switch (priv->bss_mode) {
2683 case NL80211_IFTYPE_UNSPECIFIED:
2684 case NL80211_IFTYPE_STATION:
2685 case NL80211_IFTYPE_ADHOC:
2686 adapter->curr_iface_comb.sta_intf++;
2687 break;
2688 case NL80211_IFTYPE_AP:
2689 adapter->curr_iface_comb.uap_intf++;
2690 break;
2691 case NL80211_IFTYPE_P2P_CLIENT:
2692 case NL80211_IFTYPE_P2P_GO:
2693 adapter->curr_iface_comb.p2p_intf++;
2694 break;
2695 default:
2696 dev_err(adapter->dev, "del_virtual_intf: type not supported\n");
2697 break;
2698 }
2699
2387 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; 2700 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
2388 2701
2702 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
2703 GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
2704 kfree(priv->hist_data);
2705
2389 return 0; 2706 return 0;
2390} 2707}
2391EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf); 2708EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf);
@@ -2421,30 +2738,16 @@ mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq,
2421} 2738}
2422 2739
2423#ifdef CONFIG_PM 2740#ifdef CONFIG_PM
2424static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, 2741static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
2425 struct cfg80211_wowlan *wowlan) 2742 struct cfg80211_wowlan *wowlan)
2426{ 2743{
2427 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); 2744 int i, filt_num = 0, ret = 0;
2428 struct mwifiex_ds_mef_cfg mef_cfg;
2429 struct mwifiex_mef_entry *mef_entry;
2430 int i, filt_num = 0, ret;
2431 bool first_pat = true; 2745 bool first_pat = true;
2432 u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1]; 2746 u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
2433 const u8 ipv4_mc_mac[] = {0x33, 0x33}; 2747 const u8 ipv4_mc_mac[] = {0x33, 0x33};
2434 const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e}; 2748 const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
2435 struct mwifiex_private *priv = 2749 struct mwifiex_ds_mef_cfg mef_cfg;
2436 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); 2750 struct mwifiex_mef_entry *mef_entry;
2437
2438 if (!wowlan) {
2439 dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
2440 return 0;
2441 }
2442
2443 if (!priv->media_connected) {
2444 dev_warn(adapter->dev,
2445 "Can not configure WOWLAN in disconnected state\n");
2446 return 0;
2447 }
2448 2751
2449 mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL); 2752 mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL);
2450 if (!mef_entry) 2753 if (!mef_entry)
@@ -2459,9 +2762,9 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2459 for (i = 0; i < wowlan->n_patterns; i++) { 2762 for (i = 0; i < wowlan->n_patterns; i++) {
2460 memset(byte_seq, 0, sizeof(byte_seq)); 2763 memset(byte_seq, 0, sizeof(byte_seq));
2461 if (!mwifiex_is_pattern_supported(&wowlan->patterns[i], 2764 if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
2462 byte_seq, 2765 byte_seq,
2463 MWIFIEX_MEF_MAX_BYTESEQ)) { 2766 MWIFIEX_MEF_MAX_BYTESEQ)) {
2464 wiphy_err(wiphy, "Pattern not supported\n"); 2767 dev_err(priv->adapter->dev, "Pattern not supported\n");
2465 kfree(mef_entry); 2768 kfree(mef_entry);
2466 return -EOPNOTSUPP; 2769 return -EOPNOTSUPP;
2467 } 2770 }
@@ -2485,9 +2788,9 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2485 2788
2486 mef_entry->filter[filt_num].repeat = 1; 2789 mef_entry->filter[filt_num].repeat = 1;
2487 mef_entry->filter[filt_num].offset = 2790 mef_entry->filter[filt_num].offset =
2488 wowlan->patterns[i].pkt_offset; 2791 wowlan->patterns[i].pkt_offset;
2489 memcpy(mef_entry->filter[filt_num].byte_seq, byte_seq, 2792 memcpy(mef_entry->filter[filt_num].byte_seq, byte_seq,
2490 sizeof(byte_seq)); 2793 sizeof(byte_seq));
2491 mef_entry->filter[filt_num].filt_type = TYPE_EQ; 2794 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2492 2795
2493 if (first_pat) 2796 if (first_pat)
@@ -2502,9 +2805,9 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2502 mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST; 2805 mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
2503 mef_entry->filter[filt_num].repeat = 16; 2806 mef_entry->filter[filt_num].repeat = 16;
2504 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr, 2807 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
2505 ETH_ALEN); 2808 ETH_ALEN);
2506 mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = 2809 mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
2507 ETH_ALEN; 2810 ETH_ALEN;
2508 mef_entry->filter[filt_num].offset = 28; 2811 mef_entry->filter[filt_num].offset = 28;
2509 mef_entry->filter[filt_num].filt_type = TYPE_EQ; 2812 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2510 if (filt_num) 2813 if (filt_num)
@@ -2513,9 +2816,9 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2513 filt_num++; 2816 filt_num++;
2514 mef_entry->filter[filt_num].repeat = 16; 2817 mef_entry->filter[filt_num].repeat = 16;
2515 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr, 2818 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
2516 ETH_ALEN); 2819 ETH_ALEN);
2517 mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = 2820 mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
2518 ETH_ALEN; 2821 ETH_ALEN;
2519 mef_entry->filter[filt_num].offset = 56; 2822 mef_entry->filter[filt_num].offset = 56;
2520 mef_entry->filter[filt_num].filt_type = TYPE_EQ; 2823 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2521 mef_entry->filter[filt_num].filt_action = TYPE_OR; 2824 mef_entry->filter[filt_num].filt_action = TYPE_OR;
@@ -2523,16 +2826,61 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2523 2826
2524 if (!mef_cfg.criteria) 2827 if (!mef_cfg.criteria)
2525 mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST | 2828 mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
2526 MWIFIEX_CRITERIA_UNICAST | 2829 MWIFIEX_CRITERIA_UNICAST |
2527 MWIFIEX_CRITERIA_MULTICAST; 2830 MWIFIEX_CRITERIA_MULTICAST;
2528 2831
2529 ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG, 2832 ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
2530 HostCmd_ACT_GEN_SET, 0, &mef_cfg, true); 2833 HostCmd_ACT_GEN_SET, 0, &mef_cfg, true);
2531 2834
2532 kfree(mef_entry); 2835 kfree(mef_entry);
2533 return ret; 2836 return ret;
2534} 2837}
2535 2838
2839static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2840 struct cfg80211_wowlan *wowlan)
2841{
2842 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
2843 struct mwifiex_ds_hs_cfg hs_cfg;
2844 int ret = 0;
2845 struct mwifiex_private *priv =
2846 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
2847
2848 if (!wowlan) {
2849 dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
2850 return 0;
2851 }
2852
2853 if (!priv->media_connected) {
2854 dev_warn(adapter->dev,
2855 "Can not configure WOWLAN in disconnected state\n");
2856 return 0;
2857 }
2858
2859 if (wowlan->n_patterns || wowlan->magic_pkt) {
2860 ret = mwifiex_set_mef_filter(priv, wowlan);
2861 if (ret) {
2862 dev_err(adapter->dev, "Failed to set MEF filter\n");
2863 return ret;
2864 }
2865 }
2866
2867 if (wowlan->disconnect) {
2868 memset(&hs_cfg, 0, sizeof(hs_cfg));
2869 hs_cfg.is_invoke_hostcmd = false;
2870 hs_cfg.conditions = HS_CFG_COND_MAC_EVENT;
2871 hs_cfg.gpio = HS_CFG_GPIO_DEF;
2872 hs_cfg.gap = HS_CFG_GAP_DEF;
2873 ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
2874 MWIFIEX_SYNC_CMD, &hs_cfg);
2875 if (ret) {
2876 dev_err(adapter->dev, "Failed to set HS params\n");
2877 return ret;
2878 }
2879 }
2880
2881 return ret;
2882}
2883
2536static int mwifiex_cfg80211_resume(struct wiphy *wiphy) 2884static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
2537{ 2885{
2538 return 0; 2886 return 0;
@@ -2803,6 +3151,102 @@ mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
2803} 3151}
2804 3152
2805static int 3153static int
3154mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
3155 struct cfg80211_csa_settings *params)
3156{
3157 struct ieee_types_header *chsw_ie;
3158 struct ieee80211_channel_sw_ie *channel_sw;
3159 int chsw_msec;
3160 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
3161
3162 if (priv->adapter->scan_processing) {
3163 dev_err(priv->adapter->dev,
3164 "radar detection: scan in process...\n");
3165 return -EBUSY;
3166 }
3167
3168 if (priv->wdev.cac_started)
3169 return -EBUSY;
3170
3171 if (cfg80211_chandef_identical(&params->chandef,
3172 &priv->dfs_chandef))
3173 return -EINVAL;
3174
3175 chsw_ie = (void *)cfg80211_find_ie(WLAN_EID_CHANNEL_SWITCH,
3176 params->beacon_csa.tail,
3177 params->beacon_csa.tail_len);
3178 if (!chsw_ie) {
3179 dev_err(priv->adapter->dev,
3180 "Could not parse channel switch announcement IE\n");
3181 return -EINVAL;
3182 }
3183
3184 channel_sw = (void *)(chsw_ie + 1);
3185 if (channel_sw->mode) {
3186 if (netif_carrier_ok(priv->netdev))
3187 netif_carrier_off(priv->netdev);
3188 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
3189 }
3190
3191 if (mwifiex_del_mgmt_ies(priv))
3192 wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
3193
3194 if (mwifiex_set_mgmt_ies(priv, &params->beacon_csa)) {
3195 wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
3196 return -EFAULT;
3197 }
3198
3199 memcpy(&priv->dfs_chandef, &params->chandef, sizeof(priv->dfs_chandef));
3200 memcpy(&priv->beacon_after, &params->beacon_after,
3201 sizeof(priv->beacon_after));
3202
3203 chsw_msec = max(channel_sw->count * priv->bss_cfg.beacon_period, 100);
3204 queue_delayed_work(priv->dfs_chan_sw_workqueue, &priv->dfs_chan_sw_work,
3205 msecs_to_jiffies(chsw_msec));
3206 return 0;
3207}
3208
3209static int
3210mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
3211 struct net_device *dev,
3212 struct cfg80211_chan_def *chandef,
3213 u32 cac_time_ms)
3214{
3215 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
3216 struct mwifiex_radar_params radar_params;
3217
3218 if (priv->adapter->scan_processing) {
3219 dev_err(priv->adapter->dev,
3220 "radar detection: scan already in process...\n");
3221 return -EBUSY;
3222 }
3223
3224 if (!mwifiex_is_11h_active(priv)) {
3225 dev_dbg(priv->adapter->dev, "Enable 11h extensions in FW\n");
3226 if (mwifiex_11h_activate(priv, true)) {
3227 dev_err(priv->adapter->dev,
3228 "Failed to activate 11h extensions!!");
3229 return -1;
3230 }
3231 priv->state_11h.is_11h_active = true;
3232 }
3233
3234 memset(&radar_params, 0, sizeof(struct mwifiex_radar_params));
3235 radar_params.chandef = chandef;
3236 radar_params.cac_time_ms = cac_time_ms;
3237
3238 memcpy(&priv->dfs_chandef, chandef, sizeof(priv->dfs_chandef));
3239
3240 if (mwifiex_send_cmd(priv, HostCmd_CMD_CHAN_REPORT_REQUEST,
3241 HostCmd_ACT_GEN_SET, 0, &radar_params, true))
3242 return -1;
3243
3244 queue_delayed_work(priv->dfs_cac_workqueue, &priv->dfs_cac_work,
3245 msecs_to_jiffies(cac_time_ms));
3246 return 0;
3247}
3248
3249static int
2806mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev, 3250mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
2807 const u8 *mac, 3251 const u8 *mac,
2808 struct station_parameters *params) 3252 struct station_parameters *params)
@@ -2866,11 +3310,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
2866 .tdls_oper = mwifiex_cfg80211_tdls_oper, 3310 .tdls_oper = mwifiex_cfg80211_tdls_oper,
2867 .add_station = mwifiex_cfg80211_add_station, 3311 .add_station = mwifiex_cfg80211_add_station,
2868 .change_station = mwifiex_cfg80211_change_station, 3312 .change_station = mwifiex_cfg80211_change_station,
3313 .start_radar_detection = mwifiex_cfg80211_start_radar_detection,
3314 .channel_switch = mwifiex_cfg80211_channel_switch,
2869}; 3315};
2870 3316
2871#ifdef CONFIG_PM 3317#ifdef CONFIG_PM
2872static const struct wiphy_wowlan_support mwifiex_wowlan_support = { 3318static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
2873 .flags = WIPHY_WOWLAN_MAGIC_PKT, 3319 .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
2874 .n_patterns = MWIFIEX_MEF_MAX_FILTERS, 3320 .n_patterns = MWIFIEX_MEF_MAX_FILTERS,
2875 .pattern_min_len = 1, 3321 .pattern_min_len = 1,
2876 .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, 3322 .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
@@ -2964,12 +3410,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2964 wiphy->cipher_suites = mwifiex_cipher_suites; 3410 wiphy->cipher_suites = mwifiex_cipher_suites;
2965 wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites); 3411 wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
2966 3412
2967 memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN); 3413 ether_addr_copy(wiphy->perm_addr, adapter->perm_addr);
2968 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 3414 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
2969 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | 3415 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
2970 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | 3416 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
2971 WIPHY_FLAG_AP_UAPSD | 3417 WIPHY_FLAG_AP_UAPSD |
2972 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 3418 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
3419 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
2973 3420
2974 if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) 3421 if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
2975 wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | 3422 wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index b8242eb2be6f..e9df8826f124 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -322,9 +322,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
322 return cfp; 322 return cfp;
323 323
324 if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG) 324 if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
325 sband = priv->wdev->wiphy->bands[IEEE80211_BAND_2GHZ]; 325 sband = priv->wdev.wiphy->bands[IEEE80211_BAND_2GHZ];
326 else 326 else
327 sband = priv->wdev->wiphy->bands[IEEE80211_BAND_5GHZ]; 327 sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ];
328 328
329 if (!sband) { 329 if (!sband) {
330 dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d\n", 330 dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d\n",
@@ -509,3 +509,21 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
509 509
510 return k; 510 return k;
511} 511}
512
513u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
514 u8 rx_rate, u8 rate_info)
515{
516 u8 rate_index = 0;
517
518 /* HT40 */
519 if ((rate_info & BIT(0)) && (rate_info & BIT(1)))
520 rate_index = MWIFIEX_RATE_INDEX_MCS0 +
521 MWIFIEX_BW20_MCS_NUM + rx_rate;
522 else if (rate_info & BIT(0)) /* HT20 */
523 rate_index = MWIFIEX_RATE_INDEX_MCS0 + rx_rate;
524 else
525 rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ?
526 rx_rate - 1 : rx_rate;
527
528 return rate_index;
529}
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 85597200badc..c5a14ff7eb82 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -315,22 +315,19 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
315 adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++; 315 adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++;
316 return -1; 316 return -1;
317 } 317 }
318 if (GET_BSS_ROLE(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY)) 318
319 == MWIFIEX_BSS_ROLE_STA) { 319 if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl))
320 if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl)) 320 /* Response is not needed for sleep confirm command */
321 /* Response is not needed for sleep 321 adapter->ps_state = PS_STATE_SLEEP;
322 confirm command */ 322 else
323 adapter->ps_state = PS_STATE_SLEEP; 323 adapter->ps_state = PS_STATE_SLEEP_CFM;
324 else 324
325 adapter->ps_state = PS_STATE_SLEEP_CFM; 325 if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
326 326 (adapter->is_hs_configured &&
327 if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) && 327 !adapter->sleep_period.period)) {
328 (adapter->is_hs_configured && 328 adapter->pm_wakeup_card_req = true;
329 !adapter->sleep_period.period)) { 329 mwifiex_hs_activated_event(mwifiex_get_priv
330 adapter->pm_wakeup_card_req = true; 330 (adapter, MWIFIEX_BSS_ROLE_ANY), true);
331 mwifiex_hs_activated_event(mwifiex_get_priv
332 (adapter, MWIFIEX_BSS_ROLE_STA), true);
333 }
334 } 331 }
335 332
336 return ret; 333 return ret;
@@ -450,6 +447,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
450 EVENT_GET_BSS_TYPE(eventcause)); 447 EVENT_GET_BSS_TYPE(eventcause));
451 if (!priv) 448 if (!priv)
452 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); 449 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
450
453 /* Clear BSS_NO_BITS from event */ 451 /* Clear BSS_NO_BITS from event */
454 eventcause &= EVENT_ID_MASK; 452 eventcause &= EVENT_ID_MASK;
455 adapter->event_cause = eventcause; 453 adapter->event_cause = eventcause;
@@ -462,12 +460,6 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
462 } 460 }
463 461
464 dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause); 462 dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause);
465 if (eventcause == EVENT_PS_SLEEP || eventcause == EVENT_PS_AWAKE) {
466 /* Handle PS_SLEEP/AWAKE events on STA */
467 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
468 if (!priv)
469 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
470 }
471 463
472 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) 464 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
473 ret = mwifiex_process_uap_event(priv); 465 ret = mwifiex_process_uap_event(priv);
@@ -1008,11 +1000,9 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
1008 list_for_each_entry_safe(cmd_node, tmp_node, 1000 list_for_each_entry_safe(cmd_node, tmp_node,
1009 &adapter->scan_pending_q, list) { 1001 &adapter->scan_pending_q, list) {
1010 list_del(&cmd_node->list); 1002 list_del(&cmd_node->list);
1011 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
1012 1003
1013 cmd_node->wait_q_enabled = false; 1004 cmd_node->wait_q_enabled = false;
1014 mwifiex_insert_cmd_to_free_q(adapter, cmd_node); 1005 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
1015 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
1016 } 1006 }
1017 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); 1007 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
1018 1008
@@ -1070,12 +1060,8 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
1070 list_for_each_entry_safe(cmd_node, tmp_node, 1060 list_for_each_entry_safe(cmd_node, tmp_node,
1071 &adapter->scan_pending_q, list) { 1061 &adapter->scan_pending_q, list) {
1072 list_del(&cmd_node->list); 1062 list_del(&cmd_node->list);
1073 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1074 scan_pending_q_flags);
1075 cmd_node->wait_q_enabled = false; 1063 cmd_node->wait_q_enabled = false;
1076 mwifiex_insert_cmd_to_free_q(adapter, cmd_node); 1064 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
1077 spin_lock_irqsave(&adapter->scan_pending_q_lock,
1078 scan_pending_q_flags);
1079 } 1065 }
1080 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1066 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1081 scan_pending_q_flags); 1067 scan_pending_q_flags);
@@ -1588,9 +1574,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
1588 le16_to_cpu(hw_spec->hw_if_version), 1574 le16_to_cpu(hw_spec->hw_if_version),
1589 le16_to_cpu(hw_spec->version)); 1575 le16_to_cpu(hw_spec->version));
1590 1576
1591 if (priv->curr_addr[0] == 0xff) 1577 ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr);
1592 memmove(priv->curr_addr, hw_spec->permanent_addr, ETH_ALEN);
1593
1594 adapter->region_code = le16_to_cpu(hw_spec->region_code); 1578 adapter->region_code = le16_to_cpu(hw_spec->region_code);
1595 1579
1596 for (i = 0; i < MWIFIEX_MAX_REGION_CODE; i++) 1580 for (i = 0; i < MWIFIEX_MAX_REGION_CODE; i++)
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 2713f7acd35e..1fb329dc6744 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -39,111 +39,6 @@ static char *bss_modes[] = {
39 "P2P_DEVICE", 39 "P2P_DEVICE",
40}; 40};
41 41
42/* size/addr for mwifiex_debug_info */
43#define item_size(n) (FIELD_SIZEOF(struct mwifiex_debug_info, n))
44#define item_addr(n) (offsetof(struct mwifiex_debug_info, n))
45
46/* size/addr for struct mwifiex_adapter */
47#define adapter_item_size(n) (FIELD_SIZEOF(struct mwifiex_adapter, n))
48#define adapter_item_addr(n) (offsetof(struct mwifiex_adapter, n))
49
50struct mwifiex_debug_data {
51 char name[32]; /* variable/array name */
52 u32 size; /* size of the variable/array */
53 size_t addr; /* address of the variable/array */
54 int num; /* number of variables in an array */
55};
56
57static struct mwifiex_debug_data items[] = {
58 {"int_counter", item_size(int_counter),
59 item_addr(int_counter), 1},
60 {"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]),
61 item_addr(packets_out[WMM_AC_VO]), 1},
62 {"wmm_ac_vi", item_size(packets_out[WMM_AC_VI]),
63 item_addr(packets_out[WMM_AC_VI]), 1},
64 {"wmm_ac_be", item_size(packets_out[WMM_AC_BE]),
65 item_addr(packets_out[WMM_AC_BE]), 1},
66 {"wmm_ac_bk", item_size(packets_out[WMM_AC_BK]),
67 item_addr(packets_out[WMM_AC_BK]), 1},
68 {"tx_buf_size", item_size(tx_buf_size),
69 item_addr(tx_buf_size), 1},
70 {"curr_tx_buf_size", item_size(curr_tx_buf_size),
71 item_addr(curr_tx_buf_size), 1},
72 {"ps_mode", item_size(ps_mode),
73 item_addr(ps_mode), 1},
74 {"ps_state", item_size(ps_state),
75 item_addr(ps_state), 1},
76 {"is_deep_sleep", item_size(is_deep_sleep),
77 item_addr(is_deep_sleep), 1},
78 {"wakeup_dev_req", item_size(pm_wakeup_card_req),
79 item_addr(pm_wakeup_card_req), 1},
80 {"wakeup_tries", item_size(pm_wakeup_fw_try),
81 item_addr(pm_wakeup_fw_try), 1},
82 {"hs_configured", item_size(is_hs_configured),
83 item_addr(is_hs_configured), 1},
84 {"hs_activated", item_size(hs_activated),
85 item_addr(hs_activated), 1},
86 {"num_tx_timeout", item_size(num_tx_timeout),
87 item_addr(num_tx_timeout), 1},
88 {"is_cmd_timedout", item_size(is_cmd_timedout),
89 item_addr(is_cmd_timedout), 1},
90 {"timeout_cmd_id", item_size(timeout_cmd_id),
91 item_addr(timeout_cmd_id), 1},
92 {"timeout_cmd_act", item_size(timeout_cmd_act),
93 item_addr(timeout_cmd_act), 1},
94 {"last_cmd_id", item_size(last_cmd_id),
95 item_addr(last_cmd_id), DBG_CMD_NUM},
96 {"last_cmd_act", item_size(last_cmd_act),
97 item_addr(last_cmd_act), DBG_CMD_NUM},
98 {"last_cmd_index", item_size(last_cmd_index),
99 item_addr(last_cmd_index), 1},
100 {"last_cmd_resp_id", item_size(last_cmd_resp_id),
101 item_addr(last_cmd_resp_id), DBG_CMD_NUM},
102 {"last_cmd_resp_index", item_size(last_cmd_resp_index),
103 item_addr(last_cmd_resp_index), 1},
104 {"last_event", item_size(last_event),
105 item_addr(last_event), DBG_CMD_NUM},
106 {"last_event_index", item_size(last_event_index),
107 item_addr(last_event_index), 1},
108 {"num_cmd_h2c_fail", item_size(num_cmd_host_to_card_failure),
109 item_addr(num_cmd_host_to_card_failure), 1},
110 {"num_cmd_sleep_cfm_fail",
111 item_size(num_cmd_sleep_cfm_host_to_card_failure),
112 item_addr(num_cmd_sleep_cfm_host_to_card_failure), 1},
113 {"num_tx_h2c_fail", item_size(num_tx_host_to_card_failure),
114 item_addr(num_tx_host_to_card_failure), 1},
115 {"num_evt_deauth", item_size(num_event_deauth),
116 item_addr(num_event_deauth), 1},
117 {"num_evt_disassoc", item_size(num_event_disassoc),
118 item_addr(num_event_disassoc), 1},
119 {"num_evt_link_lost", item_size(num_event_link_lost),
120 item_addr(num_event_link_lost), 1},
121 {"num_cmd_deauth", item_size(num_cmd_deauth),
122 item_addr(num_cmd_deauth), 1},
123 {"num_cmd_assoc_ok", item_size(num_cmd_assoc_success),
124 item_addr(num_cmd_assoc_success), 1},
125 {"num_cmd_assoc_fail", item_size(num_cmd_assoc_failure),
126 item_addr(num_cmd_assoc_failure), 1},
127 {"cmd_sent", item_size(cmd_sent),
128 item_addr(cmd_sent), 1},
129 {"data_sent", item_size(data_sent),
130 item_addr(data_sent), 1},
131 {"cmd_resp_received", item_size(cmd_resp_received),
132 item_addr(cmd_resp_received), 1},
133 {"event_received", item_size(event_received),
134 item_addr(event_received), 1},
135
136 /* variables defined in struct mwifiex_adapter */
137 {"cmd_pending", adapter_item_size(cmd_pending),
138 adapter_item_addr(cmd_pending), 1},
139 {"tx_pending", adapter_item_size(tx_pending),
140 adapter_item_addr(tx_pending), 1},
141 {"rx_pending", adapter_item_size(rx_pending),
142 adapter_item_addr(rx_pending), 1},
143};
144
145static int num_of_items = ARRAY_SIZE(items);
146
147/* 42/*
148 * Proc info file read handler. 43 * Proc info file read handler.
149 * 44 *
@@ -297,6 +192,8 @@ mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
297 * - Number of FCS errors 192 * - Number of FCS errors
298 * - Number of Tx frames 193 * - Number of Tx frames
299 * - WEP ICV error counts 194 * - WEP ICV error counts
195 * - Number of received beacons
196 * - Number of missed beacons
300 */ 197 */
301static ssize_t 198static ssize_t
302mwifiex_getlog_read(struct file *file, char __user *ubuf, 199mwifiex_getlog_read(struct file *file, char __user *ubuf,
@@ -333,7 +230,9 @@ mwifiex_getlog_read(struct file *file, char __user *ubuf,
333 "wepicverrcnt-1 %u\n" 230 "wepicverrcnt-1 %u\n"
334 "wepicverrcnt-2 %u\n" 231 "wepicverrcnt-2 %u\n"
335 "wepicverrcnt-3 %u\n" 232 "wepicverrcnt-3 %u\n"
336 "wepicverrcnt-4 %u\n", 233 "wepicverrcnt-4 %u\n"
234 "bcn_rcv_cnt %u\n"
235 "bcn_miss_cnt %u\n",
337 stats.mcast_tx_frame, 236 stats.mcast_tx_frame,
338 stats.failed, 237 stats.failed,
339 stats.retry, 238 stats.retry,
@@ -349,7 +248,9 @@ mwifiex_getlog_read(struct file *file, char __user *ubuf,
349 stats.wep_icv_error[0], 248 stats.wep_icv_error[0],
350 stats.wep_icv_error[1], 249 stats.wep_icv_error[1],
351 stats.wep_icv_error[2], 250 stats.wep_icv_error[2],
352 stats.wep_icv_error[3]); 251 stats.wep_icv_error[3],
252 stats.bcn_rcv_cnt,
253 stats.bcn_miss_cnt);
353 254
354 255
355 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page, 256 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
@@ -360,6 +261,103 @@ free_and_exit:
360 return ret; 261 return ret;
361} 262}
362 263
264/* Sysfs histogram file read handler.
265 *
266 * This function is called when the 'histogram' file is opened for reading
267 * It prints the following histogram information -
268 * - Number of histogram samples
269 * - Receive packet number of each rx_rate
270 * - Receive packet number of each snr
271 * - Receive packet number of each nosie_flr
272 * - Receive packet number of each signal streath
273 */
274static ssize_t
275mwifiex_histogram_read(struct file *file, char __user *ubuf,
276 size_t count, loff_t *ppos)
277{
278 struct mwifiex_private *priv =
279 (struct mwifiex_private *)file->private_data;
280 ssize_t ret;
281 struct mwifiex_histogram_data *phist_data;
282 int i, value;
283 unsigned long page = get_zeroed_page(GFP_KERNEL);
284 char *p = (char *)page;
285
286 if (!p)
287 return -ENOMEM;
288
289 if (!priv || !priv->hist_data)
290 return -EFAULT;
291 phist_data = priv->hist_data;
292
293 p += sprintf(p, "\n"
294 "total samples = %d\n",
295 atomic_read(&phist_data->num_samples));
296
297 p += sprintf(p, "rx rates (in Mbps): 0=1M 1=2M");
298 p += sprintf(p, "2=5.5M 3=11M 4=6M 5=9M 6=12M\n");
299 p += sprintf(p, "7=18M 8=24M 9=36M 10=48M 11=54M");
300 p += sprintf(p, "12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n");
301
302 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info)) {
303 p += sprintf(p, "44-53=MCS0-9(VHT:BW20)");
304 p += sprintf(p, "54-63=MCS0-9(VHT:BW40)");
305 p += sprintf(p, "64-73=MCS0-9(VHT:BW80)\n\n");
306 } else {
307 p += sprintf(p, "\n");
308 }
309
310 for (i = 0; i < MWIFIEX_MAX_RX_RATES; i++) {
311 value = atomic_read(&phist_data->rx_rate[i]);
312 if (value)
313 p += sprintf(p, "rx_rate[%02d] = %d\n", i, value);
314 }
315
316 if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info)) {
317 for (i = MWIFIEX_MAX_RX_RATES; i < MWIFIEX_MAX_AC_RX_RATES;
318 i++) {
319 value = atomic_read(&phist_data->rx_rate[i]);
320 if (value)
321 p += sprintf(p, "rx_rate[%02d] = %d\n",
322 i, value);
323 }
324 }
325
326 for (i = 0; i < MWIFIEX_MAX_SNR; i++) {
327 value = atomic_read(&phist_data->snr[i]);
328 if (value)
329 p += sprintf(p, "snr[%02ddB] = %d\n", i, value);
330 }
331 for (i = 0; i < MWIFIEX_MAX_NOISE_FLR; i++) {
332 value = atomic_read(&phist_data->noise_flr[i]);
333 if (value)
334 p += sprintf(p, "noise_flr[-%02ddBm] = %d\n",
335 (int)(i-128), value);
336 }
337 for (i = 0; i < MWIFIEX_MAX_SIG_STRENGTH; i++) {
338 value = atomic_read(&phist_data->sig_str[i]);
339 if (value)
340 p += sprintf(p, "sig_strength[-%02ddBm] = %d\n",
341 i, value);
342 }
343
344 ret = simple_read_from_buffer(ubuf, count, ppos, (char *)page,
345 (unsigned long)p - page);
346
347 return ret;
348}
349
350static ssize_t
351mwifiex_histogram_write(struct file *file, const char __user *ubuf,
352 size_t count, loff_t *ppos)
353{
354 struct mwifiex_private *priv = (void *)file->private_data;
355
356 if (priv && priv->hist_data)
357 mwifiex_hist_data_reset(priv);
358 return 0;
359}
360
363static struct mwifiex_debug_info info; 361static struct mwifiex_debug_info info;
364 362
365/* 363/*
@@ -415,13 +413,9 @@ mwifiex_debug_read(struct file *file, char __user *ubuf,
415{ 413{
416 struct mwifiex_private *priv = 414 struct mwifiex_private *priv =
417 (struct mwifiex_private *) file->private_data; 415 (struct mwifiex_private *) file->private_data;
418 struct mwifiex_debug_data *d = &items[0];
419 unsigned long page = get_zeroed_page(GFP_KERNEL); 416 unsigned long page = get_zeroed_page(GFP_KERNEL);
420 char *p = (char *) page; 417 char *p = (char *) page;
421 ssize_t ret; 418 ssize_t ret;
422 size_t size, addr;
423 long val;
424 int i, j;
425 419
426 if (!p) 420 if (!p)
427 return -ENOMEM; 421 return -ENOMEM;
@@ -430,68 +424,7 @@ mwifiex_debug_read(struct file *file, char __user *ubuf,
430 if (ret) 424 if (ret)
431 goto free_and_exit; 425 goto free_and_exit;
432 426
433 for (i = 0; i < num_of_items; i++) { 427 p += mwifiex_debug_info_to_buffer(priv, p, &info);
434 p += sprintf(p, "%s=", d[i].name);
435
436 size = d[i].size / d[i].num;
437
438 if (i < (num_of_items - 3))
439 addr = d[i].addr + (size_t) &info;
440 else /* The last 3 items are struct mwifiex_adapter variables */
441 addr = d[i].addr + (size_t) priv->adapter;
442
443 for (j = 0; j < d[i].num; j++) {
444 switch (size) {
445 case 1:
446 val = *((u8 *) addr);
447 break;
448 case 2:
449 val = *((u16 *) addr);
450 break;
451 case 4:
452 val = *((u32 *) addr);
453 break;
454 case 8:
455 val = *((long long *) addr);
456 break;
457 default:
458 val = -1;
459 break;
460 }
461
462 p += sprintf(p, "%#lx ", val);
463 addr += size;
464 }
465
466 p += sprintf(p, "\n");
467 }
468
469 if (info.tx_tbl_num) {
470 p += sprintf(p, "Tx BA stream table:\n");
471 for (i = 0; i < info.tx_tbl_num; i++)
472 p += sprintf(p, "tid = %d, ra = %pM\n",
473 info.tx_tbl[i].tid, info.tx_tbl[i].ra);
474 }
475
476 if (info.rx_tbl_num) {
477 p += sprintf(p, "Rx reorder table:\n");
478 for (i = 0; i < info.rx_tbl_num; i++) {
479 p += sprintf(p, "tid = %d, ta = %pM, "
480 "start_win = %d, "
481 "win_size = %d, buffer: ",
482 info.rx_tbl[i].tid,
483 info.rx_tbl[i].ta,
484 info.rx_tbl[i].start_win,
485 info.rx_tbl[i].win_size);
486
487 for (j = 0; j < info.rx_tbl[i].win_size; j++)
488 p += sprintf(p, "%c ",
489 info.rx_tbl[i].buffer[j] ?
490 '1' : '0');
491
492 p += sprintf(p, "\n");
493 }
494 }
495 428
496 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page, 429 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
497 (unsigned long) p - page); 430 (unsigned long) p - page);
@@ -817,6 +750,7 @@ MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
817MWIFIEX_DFS_FILE_OPS(regrdwr); 750MWIFIEX_DFS_FILE_OPS(regrdwr);
818MWIFIEX_DFS_FILE_OPS(rdeeprom); 751MWIFIEX_DFS_FILE_OPS(rdeeprom);
819MWIFIEX_DFS_FILE_OPS(hscfg); 752MWIFIEX_DFS_FILE_OPS(hscfg);
753MWIFIEX_DFS_FILE_OPS(histogram);
820 754
821/* 755/*
822 * This function creates the debug FS directory structure and the files. 756 * This function creates the debug FS directory structure and the files.
@@ -840,6 +774,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
840 MWIFIEX_DFS_ADD_FILE(rdeeprom); 774 MWIFIEX_DFS_ADD_FILE(rdeeprom);
841 MWIFIEX_DFS_ADD_FILE(fw_dump); 775 MWIFIEX_DFS_ADD_FILE(fw_dump);
842 MWIFIEX_DFS_ADD_FILE(hscfg); 776 MWIFIEX_DFS_ADD_FILE(hscfg);
777 MWIFIEX_DFS_ADD_FILE(histogram);
843} 778}
844 779
845/* 780/*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 2269acf41ad8..88d0eade6bb1 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -32,15 +32,19 @@
32 32
33#define MWIFIEX_MAX_BSS_NUM (3) 33#define MWIFIEX_MAX_BSS_NUM (3)
34 34
35#define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd) 35#define MWIFIEX_DMA_ALIGN_SZ 64
36 * + 4 byte alignment 36#define MAX_TXPD_SZ 32
37 */ 37#define INTF_HDR_ALIGN 4
38
39#define MWIFIEX_MIN_DATA_HEADER_LEN (MWIFIEX_DMA_ALIGN_SZ + INTF_HDR_ALIGN + \
40 MAX_TXPD_SZ)
38#define MWIFIEX_MGMT_FRAME_HEADER_SIZE 8 /* sizeof(pkt_type) 41#define MWIFIEX_MGMT_FRAME_HEADER_SIZE 8 /* sizeof(pkt_type)
39 * + sizeof(tx_control) 42 * + sizeof(tx_control)
40 */ 43 */
41 44
42#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2 45#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
43#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16 46#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
47#define MWIFIEX_MAX_TDLS_PEER_SUPPORTED 8
44 48
45#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 64 49#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 64
46#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 64 50#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 64
@@ -92,6 +96,20 @@
92#define MWIFIEX_TDLS_MAX_FAIL_COUNT 4 96#define MWIFIEX_TDLS_MAX_FAIL_COUNT 4
93#define MWIFIEX_AUTO_TDLS_IDLE_TIME 10 97#define MWIFIEX_AUTO_TDLS_IDLE_TIME 10
94 98
99/* 54M rates, index from 0 to 11 */
100#define MWIFIEX_RATE_INDEX_MCS0 12
101/* 12-27=MCS0-15(BW20) */
102#define MWIFIEX_BW20_MCS_NUM 15
103
104/* Rate index for OFDM 0 */
105#define MWIFIEX_RATE_INDEX_OFDM0 4
106
107#define MWIFIEX_MAX_STA_NUM 1
108#define MWIFIEX_MAX_UAP_NUM 1
109#define MWIFIEX_MAX_P2P_NUM 1
110
111#define MWIFIEX_A_BAND_START_FREQ 5000
112
95enum mwifiex_bss_type { 113enum mwifiex_bss_type {
96 MWIFIEX_BSS_TYPE_STA = 0, 114 MWIFIEX_BSS_TYPE_STA = 0,
97 MWIFIEX_BSS_TYPE_UAP = 1, 115 MWIFIEX_BSS_TYPE_UAP = 1,
@@ -204,4 +222,35 @@ struct mwifiex_chan_stats {
204 u16 cca_scan_dur; 222 u16 cca_scan_dur;
205 u16 cca_busy_dur; 223 u16 cca_busy_dur;
206} __packed; 224} __packed;
225
226#define MWIFIEX_HIST_MAX_SAMPLES 1048576
227#define MWIFIEX_MAX_RX_RATES 44
228#define MWIFIEX_MAX_AC_RX_RATES 74
229#define MWIFIEX_MAX_SNR 256
230#define MWIFIEX_MAX_NOISE_FLR 256
231#define MWIFIEX_MAX_SIG_STRENGTH 256
232
233struct mwifiex_histogram_data {
234 atomic_t rx_rate[MWIFIEX_MAX_AC_RX_RATES];
235 atomic_t snr[MWIFIEX_MAX_SNR];
236 atomic_t noise_flr[MWIFIEX_MAX_NOISE_FLR];
237 atomic_t sig_str[MWIFIEX_MAX_SIG_STRENGTH];
238 atomic_t num_samples;
239};
240
241struct mwifiex_iface_comb {
242 u8 sta_intf;
243 u8 uap_intf;
244 u8 p2p_intf;
245};
246
247struct mwifiex_radar_params {
248 struct cfg80211_chan_def *chandef;
249 u32 cac_time_ms;
250} __packed;
251
252struct mwifiex_11h_intf_state {
253 bool is_11h_enabled;
254 bool is_11h_active;
255} __packed;
207#endif /* !_MWIFIEX_DECL_H_ */ 256#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c
index 04e56b5fc535..65d8d6d4b6ba 100644
--- a/drivers/net/wireless/mwifiex/ethtool.c
+++ b/drivers/net/wireless/mwifiex/ethtool.c
@@ -76,7 +76,9 @@ mwifiex_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
76 76
77 dump->flag = adapter->curr_mem_idx; 77 dump->flag = adapter->curr_mem_idx;
78 dump->version = 1; 78 dump->version = 1;
79 if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) { 79 if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
80 dump->len = adapter->drv_info_size;
81 } else if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) {
80 entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx]; 82 entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
81 dump->len = entry->mem_size; 83 dump->len = entry->mem_size;
82 } else { 84 } else {
@@ -98,6 +100,13 @@ mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
98 if (!adapter->if_ops.fw_dump) 100 if (!adapter->if_ops.fw_dump)
99 return -ENOTSUPP; 101 return -ENOTSUPP;
100 102
103 if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
104 if (!adapter->drv_info_dump)
105 return -EFAULT;
106 memcpy(p, adapter->drv_info_dump, adapter->drv_info_size);
107 return 0;
108 }
109
101 if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) { 110 if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
102 dev_err(adapter->dev, "firmware dump in progress!!\n"); 111 dev_err(adapter->dev, "firmware dump in progress!!\n");
103 return -EBUSY; 112 return -EBUSY;
@@ -125,6 +134,11 @@ static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val)
125 if (!adapter->if_ops.fw_dump) 134 if (!adapter->if_ops.fw_dump)
126 return -ENOTSUPP; 135 return -ENOTSUPP;
127 136
137 if (val->flag == MWIFIEX_DRV_INFO_IDX) {
138 adapter->curr_mem_idx = MWIFIEX_DRV_INFO_IDX;
139 return 0;
140 }
141
128 if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) { 142 if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
129 dev_err(adapter->dev, "firmware dump in progress!!\n"); 143 dev_err(adapter->dev, "firmware dump in progress!!\n");
130 return -EBUSY; 144 return -EBUSY;
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index fb5936eb82e3..df553e86a0ad 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -158,6 +158,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
158#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84) 158#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
159#define TLV_TYPE_BSS_SCAN_RSP (PROPRIETARY_TLV_BASE_ID + 86) 159#define TLV_TYPE_BSS_SCAN_RSP (PROPRIETARY_TLV_BASE_ID + 86)
160#define TLV_TYPE_BSS_SCAN_INFO (PROPRIETARY_TLV_BASE_ID + 87) 160#define TLV_TYPE_BSS_SCAN_INFO (PROPRIETARY_TLV_BASE_ID + 87)
161#define TLV_TYPE_CHANRPT_11H_BASIC (PROPRIETARY_TLV_BASE_ID + 91)
161#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93) 162#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93)
162#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94) 163#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
163#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104) 164#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104)
@@ -233,6 +234,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
233#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22)) 234#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
234#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30)) 235#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
235#define ISALLOWED_CHANWIDTH40(ht_param) (ht_param & BIT(2)) 236#define ISALLOWED_CHANWIDTH40(ht_param) (ht_param & BIT(2))
237#define GETSUPP_TXBASTREAMS(Dot11nDevCap) ((Dot11nDevCap >> 18) & 0xF)
236 238
237/* httxcfg bitmap 239/* httxcfg bitmap
238 * 0 reserved 240 * 0 reserved
@@ -335,6 +337,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
335#define HostCmd_CMD_11N_ADDBA_RSP 0x00cf 337#define HostCmd_CMD_11N_ADDBA_RSP 0x00cf
336#define HostCmd_CMD_11N_DELBA 0x00d0 338#define HostCmd_CMD_11N_DELBA 0x00d0
337#define HostCmd_CMD_RECONFIGURE_TX_BUFF 0x00d9 339#define HostCmd_CMD_RECONFIGURE_TX_BUFF 0x00d9
340#define HostCmd_CMD_CHAN_REPORT_REQUEST 0x00dd
338#define HostCmd_CMD_AMSDU_AGGR_CTRL 0x00df 341#define HostCmd_CMD_AMSDU_AGGR_CTRL 0x00df
339#define HostCmd_CMD_TXPWR_CFG 0x00d1 342#define HostCmd_CMD_TXPWR_CFG 0x00d1
340#define HostCmd_CMD_TX_RATE_CFG 0x00d6 343#define HostCmd_CMD_TX_RATE_CFG 0x00d6
@@ -492,6 +495,8 @@ enum P2P_MODES {
492#define EVENT_HOSTWAKE_STAIE 0x0000004d 495#define EVENT_HOSTWAKE_STAIE 0x0000004d
493#define EVENT_CHANNEL_SWITCH_ANN 0x00000050 496#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
494#define EVENT_TDLS_GENERIC_EVENT 0x00000052 497#define EVENT_TDLS_GENERIC_EVENT 0x00000052
498#define EVENT_RADAR_DETECTED 0x00000053
499#define EVENT_CHANNEL_REPORT_RDY 0x00000054
495#define EVENT_EXT_SCAN_REPORT 0x00000058 500#define EVENT_EXT_SCAN_REPORT 0x00000058
496#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f 501#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
497#define EVENT_TX_STATUS_REPORT 0x00000074 502#define EVENT_TX_STATUS_REPORT 0x00000074
@@ -529,6 +534,8 @@ enum P2P_MODES {
529 534
530#define MWIFIEX_FW_V15 15 535#define MWIFIEX_FW_V15 15
531 536
537#define MWIFIEX_MASTER_RADAR_DET_MASK BIT(1)
538
532struct mwifiex_ie_types_header { 539struct mwifiex_ie_types_header {
533 __le16 type; 540 __le16 type;
534 __le16 len; 541 __le16 len;
@@ -1076,6 +1083,8 @@ struct host_cmd_ds_802_11_get_log {
1076 __le32 tx_frame; 1083 __le32 tx_frame;
1077 __le32 reserved; 1084 __le32 reserved;
1078 __le32 wep_icv_err_cnt[4]; 1085 __le32 wep_icv_err_cnt[4];
1086 __le32 bcn_rcv_cnt;
1087 __le32 bcn_miss_cnt;
1079}; 1088};
1080 1089
1081/* Enumeration for rate format */ 1090/* Enumeration for rate format */
@@ -1213,6 +1222,24 @@ struct host_cmd_ds_tdls_oper {
1213 u8 peer_mac[ETH_ALEN]; 1222 u8 peer_mac[ETH_ALEN];
1214} __packed; 1223} __packed;
1215 1224
1225struct mwifiex_chan_desc {
1226 __le16 start_freq;
1227 u8 chan_width;
1228 u8 chan_num;
1229} __packed;
1230
1231struct host_cmd_ds_chan_rpt_req {
1232 struct mwifiex_chan_desc chan_desc;
1233 __le32 msec_dwell_time;
1234} __packed;
1235
1236struct host_cmd_ds_chan_rpt_event {
1237 __le32 result;
1238 __le64 start_tsf;
1239 __le32 duration;
1240 u8 tlvbuf[0];
1241} __packed;
1242
1216struct mwifiex_fixed_bcn_param { 1243struct mwifiex_fixed_bcn_param {
1217 __le64 timestamp; 1244 __le64 timestamp;
1218 __le16 beacon_period; 1245 __le16 beacon_period;
@@ -1789,6 +1816,39 @@ struct mwifiex_ie_types_rssi_threshold {
1789 u8 evt_freq; 1816 u8 evt_freq;
1790} __packed; 1817} __packed;
1791 1818
1819#define MWIFIEX_DFS_REC_HDR_LEN 8
1820#define MWIFIEX_DFS_REC_HDR_NUM 10
1821#define MWIFIEX_BIN_COUNTER_LEN 7
1822
1823struct mwifiex_radar_det_event {
1824 __le32 detect_count;
1825 u8 reg_domain; /*1=fcc, 2=etsi, 3=mic*/
1826 u8 det_type; /*0=none, 1=pw(chirp), 2=pri(radar)*/
1827 __le16 pw_chirp_type;
1828 u8 pw_chirp_idx;
1829 u8 pw_value;
1830 u8 pri_radar_type;
1831 u8 pri_bincnt;
1832 u8 bin_counter[MWIFIEX_BIN_COUNTER_LEN];
1833 u8 num_dfs_records;
1834 u8 dfs_record_hdr[MWIFIEX_DFS_REC_HDR_NUM][MWIFIEX_DFS_REC_HDR_LEN];
1835 __le32 passed;
1836} __packed;
1837
1838struct meas_rpt_map {
1839 u8 rssi:3;
1840 u8 unmeasured:1;
1841 u8 radar:1;
1842 u8 unidentified_sig:1;
1843 u8 ofdm_preamble:1;
1844 u8 bss:1;
1845} __packed;
1846
1847struct mwifiex_ie_types_chan_rpt_data {
1848 struct mwifiex_ie_types_header header;
1849 struct meas_rpt_map map;
1850} __packed;
1851
1792struct host_cmd_ds_802_11_subsc_evt { 1852struct host_cmd_ds_802_11_subsc_evt {
1793 __le16 action; 1853 __le16 action;
1794 __le16 events; 1854 __le16 events;
@@ -1901,6 +1961,7 @@ struct host_cmd_ds_command {
1901 struct host_cmd_11ac_vht_cfg vht_cfg; 1961 struct host_cmd_11ac_vht_cfg vht_cfg;
1902 struct host_cmd_ds_coalesce_cfg coalesce_cfg; 1962 struct host_cmd_ds_coalesce_cfg coalesce_cfg;
1903 struct host_cmd_ds_tdls_oper tdls_oper; 1963 struct host_cmd_ds_tdls_oper tdls_oper;
1964 struct host_cmd_ds_chan_rpt_req chan_rpt_req;
1904 } params; 1965 } params;
1905} __packed; 1966} __packed;
1906 1967
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index b933794758b7..f3b6ed249403 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -317,27 +317,27 @@ done:
317 return ret; 317 return ret;
318} 318}
319 319
320/* This function parses different IEs-tail IEs, beacon IEs, probe response IEs, 320/* This function parses head and tail IEs, from cfg80211_beacon_data and sets
321 * association response IEs from cfg80211_ap_settings function and sets these IE 321 * these IE to FW.
322 * to FW.
323 */ 322 */
324int mwifiex_set_mgmt_ies(struct mwifiex_private *priv, 323static int mwifiex_uap_set_head_tail_ies(struct mwifiex_private *priv,
325 struct cfg80211_beacon_data *info) 324 struct cfg80211_beacon_data *info)
326{ 325{
327 struct mwifiex_ie *gen_ie; 326 struct mwifiex_ie *gen_ie;
328 struct ieee_types_header *rsn_ie, *wpa_ie = NULL; 327 struct ieee_types_header *rsn_ie = NULL, *wpa_ie = NULL;
329 u16 rsn_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0; 328 struct ieee_types_header *chsw_ie = NULL;
329 u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
330 const u8 *vendor_ie; 330 const u8 *vendor_ie;
331 331
332 if (info->tail && info->tail_len) { 332 gen_ie = kzalloc(sizeof(*gen_ie), GFP_KERNEL);
333 gen_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL); 333 if (!gen_ie)
334 if (!gen_ie) 334 return -ENOMEM;
335 return -ENOMEM; 335 gen_ie->ie_index = cpu_to_le16(gen_idx);
336 gen_ie->ie_index = cpu_to_le16(rsn_idx); 336 gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
337 gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON | 337 MGMT_MASK_PROBE_RESP |
338 MGMT_MASK_PROBE_RESP | 338 MGMT_MASK_ASSOC_RESP);
339 MGMT_MASK_ASSOC_RESP);
340 339
340 if (info->tail && info->tail_len) {
341 rsn_ie = (void *)cfg80211_find_ie(WLAN_EID_RSN, 341 rsn_ie = (void *)cfg80211_find_ie(WLAN_EID_RSN,
342 info->tail, info->tail_len); 342 info->tail, info->tail_len);
343 if (rsn_ie) { 343 if (rsn_ie) {
@@ -358,19 +358,41 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
358 gen_ie->ie_length = cpu_to_le16(ie_len); 358 gen_ie->ie_length = cpu_to_le16(ie_len);
359 } 359 }
360 360
361 if (rsn_ie || wpa_ie) { 361 chsw_ie = (void *)cfg80211_find_ie(WLAN_EID_CHANNEL_SWITCH,
362 if (mwifiex_update_uap_custom_ie(priv, gen_ie, &rsn_idx, 362 info->tail, info->tail_len);
363 NULL, NULL, 363 if (chsw_ie) {
364 NULL, NULL)) { 364 memcpy(gen_ie->ie_buffer + ie_len,
365 kfree(gen_ie); 365 chsw_ie, chsw_ie->len + 2);
366 return -1; 366 ie_len += chsw_ie->len + 2;
367 } 367 gen_ie->ie_length = cpu_to_le16(ie_len);
368 priv->rsn_idx = rsn_idx;
369 } 368 }
369 }
370 370
371 kfree(gen_ie); 371 if (rsn_ie || wpa_ie || chsw_ie) {
372 if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL,
373 NULL, NULL, NULL)) {
374 kfree(gen_ie);
375 return -1;
376 }
377 priv->gen_idx = gen_idx;
372 } 378 }
373 379
380 kfree(gen_ie);
381 return 0;
382}
383
384/* This function parses different IEs-head & tail IEs, beacon IEs,
385 * probe response IEs, association response IEs from cfg80211_ap_settings
386 * function and sets these IE to FW.
387 */
388int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
389 struct cfg80211_beacon_data *info)
390{
391 int ret;
392
393 ret = mwifiex_uap_set_head_tail_ies(priv, info);
394 return ret;
395
374 return mwifiex_set_mgmt_beacon_data_ies(priv, info); 396 return mwifiex_set_mgmt_beacon_data_ies(priv, info);
375} 397}
376 398
@@ -378,25 +400,25 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
378int mwifiex_del_mgmt_ies(struct mwifiex_private *priv) 400int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
379{ 401{
380 struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL; 402 struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
381 struct mwifiex_ie *ar_ie = NULL, *rsn_ie = NULL; 403 struct mwifiex_ie *ar_ie = NULL, *gen_ie = NULL;
382 int ret = 0; 404 int ret = 0;
383 405
384 if (priv->rsn_idx != MWIFIEX_AUTO_IDX_MASK) { 406 if (priv->gen_idx != MWIFIEX_AUTO_IDX_MASK) {
385 rsn_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL); 407 gen_ie = kmalloc(sizeof(*gen_ie), GFP_KERNEL);
386 if (!rsn_ie) 408 if (!gen_ie)
387 return -ENOMEM; 409 return -ENOMEM;
388 410
389 rsn_ie->ie_index = cpu_to_le16(priv->rsn_idx); 411 gen_ie->ie_index = cpu_to_le16(priv->gen_idx);
390 rsn_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK); 412 gen_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
391 rsn_ie->ie_length = 0; 413 gen_ie->ie_length = 0;
392 if (mwifiex_update_uap_custom_ie(priv, rsn_ie, &priv->rsn_idx, 414 if (mwifiex_update_uap_custom_ie(priv, gen_ie, &priv->gen_idx,
393 NULL, &priv->proberesp_idx, 415 NULL, &priv->proberesp_idx,
394 NULL, &priv->assocresp_idx)) { 416 NULL, &priv->assocresp_idx)) {
395 ret = -1; 417 ret = -1;
396 goto done; 418 goto done;
397 } 419 }
398 420
399 priv->rsn_idx = MWIFIEX_AUTO_IDX_MASK; 421 priv->gen_idx = MWIFIEX_AUTO_IDX_MASK;
400 } 422 }
401 423
402 if (priv->beacon_idx != MWIFIEX_AUTO_IDX_MASK) { 424 if (priv->beacon_idx != MWIFIEX_AUTO_IDX_MASK) {
@@ -440,7 +462,6 @@ done:
440 kfree(beacon_ie); 462 kfree(beacon_ie);
441 kfree(pr_ie); 463 kfree(pr_ie);
442 kfree(ar_ie); 464 kfree(ar_ie);
443 kfree(rsn_ie);
444 465
445 return ret; 466 return ret;
446} 467}
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 520ad4a3018b..b77ba743e1c4 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -52,6 +52,18 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
52 return 0; 52 return 0;
53} 53}
54 54
55static void wakeup_timer_fn(unsigned long data)
56{
57 struct mwifiex_adapter *adapter = (struct mwifiex_adapter *)data;
58
59 dev_err(adapter->dev, "Firmware wakeup failed\n");
60 adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
61 mwifiex_cancel_all_pending_cmd(adapter);
62
63 if (adapter->if_ops.card_reset)
64 adapter->if_ops.card_reset(adapter);
65}
66
55/* 67/*
56 * This function initializes the private structure and sets default 68 * This function initializes the private structure and sets default
57 * values to the members. 69 * values to the members.
@@ -140,6 +152,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
140 priv->check_tdls_tx = false; 152 priv->check_tdls_tx = false;
141 memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID); 153 memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID);
142 154
155 mwifiex_init_11h_params(priv);
156
143 return mwifiex_add_bss_prio_tbl(priv); 157 return mwifiex_add_bss_prio_tbl(priv);
144} 158}
145 159
@@ -282,9 +296,16 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
282 memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter)); 296 memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
283 adapter->arp_filter_size = 0; 297 adapter->arp_filter_size = 0;
284 adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX; 298 adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
285 adapter->ext_scan = true; 299 adapter->ext_scan = false;
286 adapter->key_api_major_ver = 0; 300 adapter->key_api_major_ver = 0;
287 adapter->key_api_minor_ver = 0; 301 adapter->key_api_minor_ver = 0;
302 memset(adapter->perm_addr, 0xff, ETH_ALEN);
303 adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
304 adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
305 adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
306
307 setup_timer(&adapter->wakeup_timer, wakeup_timer_fn,
308 (unsigned long)adapter);
288} 309}
289 310
290/* 311/*
@@ -391,7 +412,10 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
391 return; 412 return;
392 } 413 }
393 414
415 del_timer(&adapter->wakeup_timer);
394 mwifiex_cancel_all_pending_cmd(adapter); 416 mwifiex_cancel_all_pending_cmd(adapter);
417 wake_up_interruptible(&adapter->cmd_wait_q.wait);
418 wake_up_interruptible(&adapter->hs_activate_wait_q);
395 419
396 /* Free lock variables */ 420 /* Free lock variables */
397 mwifiex_free_lock_list(adapter); 421 mwifiex_free_lock_list(adapter);
@@ -411,6 +435,11 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
411 entry->mem_size = 0; 435 entry->mem_size = 0;
412 } 436 }
413 437
438 if (adapter->drv_info_dump) {
439 vfree(adapter->drv_info_dump);
440 adapter->drv_info_size = 0;
441 }
442
414 if (adapter->sleep_cfm) 443 if (adapter->sleep_cfm)
415 dev_kfree_skb_any(adapter->sleep_cfm); 444 dev_kfree_skb_any(adapter->sleep_cfm);
416} 445}
@@ -528,7 +557,8 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
528 557
529 for (i = 0; i < adapter->priv_num; i++) { 558 for (i = 0; i < adapter->priv_num; i++) {
530 if (adapter->priv[i]) { 559 if (adapter->priv[i]) {
531 ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta); 560 ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
561 true);
532 if (ret == -1) 562 if (ret == -1)
533 return -1; 563 return -1;
534 564
@@ -653,6 +683,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
653 priv = adapter->priv[i]; 683 priv = adapter->priv[i];
654 684
655 mwifiex_clean_auto_tdls(priv); 685 mwifiex_clean_auto_tdls(priv);
686 mwifiex_abort_cac(priv);
656 mwifiex_clean_txrx(priv); 687 mwifiex_clean_txrx(priv);
657 mwifiex_delete_bss_prio_tbl(priv); 688 mwifiex_delete_bss_prio_tbl(priv);
658 } 689 }
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 0847f3e07ab7..d2b05c3a96da 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -137,6 +137,8 @@ struct mwifiex_ds_get_stats {
137 u32 fcs_error; 137 u32 fcs_error;
138 u32 tx_frame; 138 u32 tx_frame;
139 u32 wep_icv_error[4]; 139 u32 wep_icv_error[4];
140 u32 bcn_rcv_cnt;
141 u32 bcn_miss_cnt;
140}; 142};
141 143
142#define MWIFIEX_MAX_VER_STR_LEN 128 144#define MWIFIEX_MAX_VER_STR_LEN 128
@@ -180,7 +182,11 @@ struct mwifiex_ds_tx_ba_stream_tbl {
180 u8 amsdu; 182 u8 amsdu;
181}; 183};
182 184
183#define DBG_CMD_NUM 5 185#define DBG_CMD_NUM 5
186
187struct tdls_peer_info {
188 u8 peer_addr[ETH_ALEN];
189};
184 190
185struct mwifiex_debug_info { 191struct mwifiex_debug_info {
186 u32 int_counter; 192 u32 int_counter;
@@ -193,6 +199,9 @@ struct mwifiex_debug_info {
193 u32 rx_tbl_num; 199 u32 rx_tbl_num;
194 struct mwifiex_ds_rx_reorder_tbl rx_tbl 200 struct mwifiex_ds_rx_reorder_tbl rx_tbl
195 [MWIFIEX_MAX_RX_BASTREAM_SUPPORTED]; 201 [MWIFIEX_MAX_RX_BASTREAM_SUPPORTED];
202 u32 tdls_peer_num;
203 struct tdls_peer_info tdls_list
204 [MWIFIEX_MAX_TDLS_PEER_SUPPORTED];
196 u16 ps_mode; 205 u16 ps_mode;
197 u32 ps_state; 206 u32 ps_state;
198 u8 is_deep_sleep; 207 u8 is_deep_sleep;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index d4d2223d1f31..7e74b4fccddd 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -83,9 +83,8 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
83 } 83 }
84 mwifiex_init_lock_list(adapter); 84 mwifiex_init_lock_list(adapter);
85 85
86 init_timer(&adapter->cmd_timer); 86 setup_timer(&adapter->cmd_timer, mwifiex_cmd_timeout_func,
87 adapter->cmd_timer.function = mwifiex_cmd_timeout_func; 87 (unsigned long)adapter);
88 adapter->cmd_timer.data = (unsigned long) adapter;
89 88
90 return 0; 89 return 0;
91 90
@@ -237,6 +236,7 @@ process_start:
237 (is_command_pending(adapter) || 236 (is_command_pending(adapter) ||
238 !mwifiex_wmm_lists_empty(adapter))) { 237 !mwifiex_wmm_lists_empty(adapter))) {
239 adapter->pm_wakeup_fw_try = true; 238 adapter->pm_wakeup_fw_try = true;
239 mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
240 adapter->if_ops.wakeup(adapter); 240 adapter->if_ops.wakeup(adapter);
241 continue; 241 continue;
242 } 242 }
@@ -244,6 +244,7 @@ process_start:
244 if (IS_CARD_RX_RCVD(adapter)) { 244 if (IS_CARD_RX_RCVD(adapter)) {
245 adapter->data_received = false; 245 adapter->data_received = false;
246 adapter->pm_wakeup_fw_try = false; 246 adapter->pm_wakeup_fw_try = false;
247 del_timer_sync(&adapter->wakeup_timer);
247 if (adapter->ps_state == PS_STATE_SLEEP) 248 if (adapter->ps_state == PS_STATE_SLEEP)
248 adapter->ps_state = PS_STATE_AWAKE; 249 adapter->ps_state = PS_STATE_AWAKE;
249 } else { 250 } else {
@@ -511,8 +512,7 @@ err_dnld_fw:
511 if (adapter->if_ops.unregister_dev) 512 if (adapter->if_ops.unregister_dev)
512 adapter->if_ops.unregister_dev(adapter); 513 adapter->if_ops.unregister_dev(adapter);
513 514
514 if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) || 515 if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
515 (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
516 pr_debug("info: %s: shutdown mwifiex\n", __func__); 516 pr_debug("info: %s: shutdown mwifiex\n", __func__);
517 adapter->init_wait_q_woken = false; 517 adapter->init_wait_q_woken = false;
518 518
@@ -562,7 +562,8 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
562static int 562static int
563mwifiex_open(struct net_device *dev) 563mwifiex_open(struct net_device *dev)
564{ 564{
565 netif_tx_start_all_queues(dev); 565 netif_carrier_off(dev);
566
566 return 0; 567 return 0;
567} 568}
568 569
@@ -801,6 +802,114 @@ mwifiex_tx_timeout(struct net_device *dev)
801 } 802 }
802} 803}
803 804
805void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
806{
807 void *p;
808 char drv_version[64];
809 struct usb_card_rec *cardp;
810 struct sdio_mmc_card *sdio_card;
811 struct mwifiex_private *priv;
812 int i, idx;
813 struct netdev_queue *txq;
814 struct mwifiex_debug_info *debug_info;
815
816 if (adapter->drv_info_dump) {
817 vfree(adapter->drv_info_dump);
818 adapter->drv_info_size = 0;
819 }
820
821 dev_info(adapter->dev, "=== DRIVER INFO DUMP START===\n");
822
823 adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
824
825 if (!adapter->drv_info_dump)
826 return;
827
828 p = (char *)(adapter->drv_info_dump);
829 p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
830
831 mwifiex_drv_get_driver_version(adapter, drv_version,
832 sizeof(drv_version) - 1);
833 p += sprintf(p, "driver_version = %s\n", drv_version);
834
835 if (adapter->iface_type == MWIFIEX_USB) {
836 cardp = (struct usb_card_rec *)adapter->card;
837 p += sprintf(p, "tx_cmd_urb_pending = %d\n",
838 atomic_read(&cardp->tx_cmd_urb_pending));
839 p += sprintf(p, "tx_data_urb_pending = %d\n",
840 atomic_read(&cardp->tx_data_urb_pending));
841 p += sprintf(p, "rx_cmd_urb_pending = %d\n",
842 atomic_read(&cardp->rx_cmd_urb_pending));
843 p += sprintf(p, "rx_data_urb_pending = %d\n",
844 atomic_read(&cardp->rx_data_urb_pending));
845 }
846
847 p += sprintf(p, "tx_pending = %d\n",
848 atomic_read(&adapter->tx_pending));
849 p += sprintf(p, "rx_pending = %d\n",
850 atomic_read(&adapter->rx_pending));
851
852 if (adapter->iface_type == MWIFIEX_SDIO) {
853 sdio_card = (struct sdio_mmc_card *)adapter->card;
854 p += sprintf(p, "\nmp_rd_bitmap=0x%x curr_rd_port=0x%x\n",
855 sdio_card->mp_rd_bitmap, sdio_card->curr_rd_port);
856 p += sprintf(p, "mp_wr_bitmap=0x%x curr_wr_port=0x%x\n",
857 sdio_card->mp_wr_bitmap, sdio_card->curr_wr_port);
858 }
859
860 for (i = 0; i < adapter->priv_num; i++) {
861 if (!adapter->priv[i] || !adapter->priv[i]->netdev)
862 continue;
863 priv = adapter->priv[i];
864 p += sprintf(p, "\n[interface : \"%s\"]\n",
865 priv->netdev->name);
866 p += sprintf(p, "wmm_tx_pending[0] = %d\n",
867 atomic_read(&priv->wmm_tx_pending[0]));
868 p += sprintf(p, "wmm_tx_pending[1] = %d\n",
869 atomic_read(&priv->wmm_tx_pending[1]));
870 p += sprintf(p, "wmm_tx_pending[2] = %d\n",
871 atomic_read(&priv->wmm_tx_pending[2]));
872 p += sprintf(p, "wmm_tx_pending[3] = %d\n",
873 atomic_read(&priv->wmm_tx_pending[3]));
874 p += sprintf(p, "media_state=\"%s\"\n", !priv->media_connected ?
875 "Disconnected" : "Connected");
876 p += sprintf(p, "carrier %s\n", (netif_carrier_ok(priv->netdev)
877 ? "on" : "off"));
878 for (idx = 0; idx < priv->netdev->num_tx_queues; idx++) {
879 txq = netdev_get_tx_queue(priv->netdev, idx);
880 p += sprintf(p, "tx queue %d:%s ", idx,
881 netif_tx_queue_stopped(txq) ?
882 "stopped" : "started");
883 }
884 p += sprintf(p, "\n%s: num_tx_timeout = %d\n",
885 priv->netdev->name, priv->num_tx_timeout);
886 }
887
888 if (adapter->iface_type == MWIFIEX_SDIO) {
889 p += sprintf(p, "\n=== SDIO register DUMP===\n");
890 if (adapter->if_ops.reg_dump)
891 p += adapter->if_ops.reg_dump(adapter, p);
892 }
893
894 p += sprintf(p, "\n=== MORE DEBUG INFORMATION\n");
895 debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
896 if (debug_info) {
897 for (i = 0; i < adapter->priv_num; i++) {
898 if (!adapter->priv[i] || !adapter->priv[i]->netdev)
899 continue;
900 priv = adapter->priv[i];
901 mwifiex_get_debug_info(priv, debug_info);
902 p += mwifiex_debug_info_to_buffer(priv, p, debug_info);
903 break;
904 }
905 kfree(debug_info);
906 }
907
908 adapter->drv_info_size = p - adapter->drv_info_dump;
909 dev_info(adapter->dev, "=== DRIVER INFO DUMP END===\n");
910}
911EXPORT_SYMBOL_GPL(mwifiex_dump_drv_info);
912
804/* 913/*
805 * CFG802.11 network device handler for statistics retrieval. 914 * CFG802.11 network device handler for statistics retrieval.
806 */ 915 */
@@ -847,26 +956,34 @@ static const struct net_device_ops mwifiex_netdev_ops = {
847 * - Nick name : Set to null 956 * - Nick name : Set to null
848 * - Number of Tx timeout : Set to 0 957 * - Number of Tx timeout : Set to 0
849 * - Device address : Set to current address 958 * - Device address : Set to current address
959 * - Rx histogram statistc : Set to 0
850 * 960 *
851 * In addition, the CFG80211 work queue is also created. 961 * In addition, the CFG80211 work queue is also created.
852 */ 962 */
853void mwifiex_init_priv_params(struct mwifiex_private *priv, 963void mwifiex_init_priv_params(struct mwifiex_private *priv,
854 struct net_device *dev) 964 struct net_device *dev)
855{ 965{
856 dev->netdev_ops = &mwifiex_netdev_ops; 966 dev->netdev_ops = &mwifiex_netdev_ops;
857 dev->destructor = free_netdev; 967 dev->destructor = free_netdev;
858 /* Initialize private structure */ 968 /* Initialize private structure */
859 priv->current_key_index = 0; 969 priv->current_key_index = 0;
860 priv->media_connected = false; 970 priv->media_connected = false;
861 memset(&priv->nick_name, 0, sizeof(priv->nick_name));
862 memset(priv->mgmt_ie, 0, 971 memset(priv->mgmt_ie, 0,
863 sizeof(struct mwifiex_ie) * MAX_MGMT_IE_INDEX); 972 sizeof(struct mwifiex_ie) * MAX_MGMT_IE_INDEX);
864 priv->beacon_idx = MWIFIEX_AUTO_IDX_MASK; 973 priv->beacon_idx = MWIFIEX_AUTO_IDX_MASK;
865 priv->proberesp_idx = MWIFIEX_AUTO_IDX_MASK; 974 priv->proberesp_idx = MWIFIEX_AUTO_IDX_MASK;
866 priv->assocresp_idx = MWIFIEX_AUTO_IDX_MASK; 975 priv->assocresp_idx = MWIFIEX_AUTO_IDX_MASK;
867 priv->rsn_idx = MWIFIEX_AUTO_IDX_MASK; 976 priv->gen_idx = MWIFIEX_AUTO_IDX_MASK;
868 priv->num_tx_timeout = 0; 977 priv->num_tx_timeout = 0;
978 ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr);
869 memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); 979 memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
980
981 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
982 GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
983 priv->hist_data = kmalloc(sizeof(*priv->hist_data), GFP_KERNEL);
984 if (priv->hist_data)
985 mwifiex_hist_data_reset(priv);
986 }
870} 987}
871 988
872/* 989/*
@@ -1000,8 +1117,7 @@ err_init_fw:
1000 pr_debug("info: %s: unregister device\n", __func__); 1117 pr_debug("info: %s: unregister device\n", __func__);
1001 if (adapter->if_ops.unregister_dev) 1118 if (adapter->if_ops.unregister_dev)
1002 adapter->if_ops.unregister_dev(adapter); 1119 adapter->if_ops.unregister_dev(adapter);
1003 if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) || 1120 if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
1004 (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
1005 pr_debug("info: %s: shutdown mwifiex\n", __func__); 1121 pr_debug("info: %s: shutdown mwifiex\n", __func__);
1006 adapter->init_wait_q_woken = false; 1122 adapter->init_wait_q_woken = false;
1007 1123
@@ -1052,6 +1168,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
1052 1168
1053 adapter->surprise_removed = true; 1169 adapter->surprise_removed = true;
1054 1170
1171 mwifiex_terminate_workqueue(adapter);
1172
1055 /* Stop data */ 1173 /* Stop data */
1056 for (i = 0; i < adapter->priv_num; i++) { 1174 for (i = 0; i < adapter->priv_num; i++) {
1057 priv = adapter->priv[i]; 1175 priv = adapter->priv[i];
@@ -1086,16 +1204,15 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
1086 continue; 1204 continue;
1087 1205
1088 rtnl_lock(); 1206 rtnl_lock();
1089 if (priv->wdev && priv->netdev) 1207 if (priv->netdev &&
1090 mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev); 1208 priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
1209 mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
1091 rtnl_unlock(); 1210 rtnl_unlock();
1092 } 1211 }
1093 1212
1094 wiphy_unregister(adapter->wiphy); 1213 wiphy_unregister(adapter->wiphy);
1095 wiphy_free(adapter->wiphy); 1214 wiphy_free(adapter->wiphy);
1096 1215
1097 mwifiex_terminate_workqueue(adapter);
1098
1099 /* Unregister device */ 1216 /* Unregister device */
1100 dev_dbg(adapter->dev, "info: unregister device\n"); 1217 dev_dbg(adapter->dev, "info: unregister device\n");
1101 if (adapter->if_ops.unregister_dev) 1218 if (adapter->if_ops.unregister_dev)
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index e66993cb5daf..f0a6af179af0 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -41,6 +41,8 @@
41#include "util.h" 41#include "util.h"
42#include "fw.h" 42#include "fw.h"
43#include "pcie.h" 43#include "pcie.h"
44#include "usb.h"
45#include "sdio.h"
44 46
45extern const char driver_version[]; 47extern const char driver_version[];
46 48
@@ -136,6 +138,8 @@ enum {
136/* Threshold for tx_timeout_cnt before we trigger a card reset */ 138/* Threshold for tx_timeout_cnt before we trigger a card reset */
137#define TX_TIMEOUT_THRESHOLD 6 139#define TX_TIMEOUT_THRESHOLD 6
138 140
141#define MWIFIEX_DRV_INFO_SIZE_MAX 0x40000
142
139struct mwifiex_dbg { 143struct mwifiex_dbg {
140 u32 num_cmd_host_to_card_failure; 144 u32 num_cmd_host_to_card_failure;
141 u32 num_cmd_sleep_cfm_host_to_card_failure; 145 u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -161,7 +165,6 @@ struct mwifiex_dbg {
161enum MWIFIEX_HARDWARE_STATUS { 165enum MWIFIEX_HARDWARE_STATUS {
162 MWIFIEX_HW_STATUS_READY, 166 MWIFIEX_HW_STATUS_READY,
163 MWIFIEX_HW_STATUS_INITIALIZING, 167 MWIFIEX_HW_STATUS_INITIALIZING,
164 MWIFIEX_HW_STATUS_FW_READY,
165 MWIFIEX_HW_STATUS_INIT_DONE, 168 MWIFIEX_HW_STATUS_INIT_DONE,
166 MWIFIEX_HW_STATUS_RESET, 169 MWIFIEX_HW_STATUS_RESET,
167 MWIFIEX_HW_STATUS_CLOSING, 170 MWIFIEX_HW_STATUS_CLOSING,
@@ -413,6 +416,7 @@ struct mwifiex_roc_cfg {
413}; 416};
414 417
415#define MWIFIEX_FW_DUMP_IDX 0xff 418#define MWIFIEX_FW_DUMP_IDX 0xff
419#define MWIFIEX_DRV_INFO_IDX 20
416#define FW_DUMP_MAX_NAME_LEN 8 420#define FW_DUMP_MAX_NAME_LEN 8
417#define FW_DUMP_HOST_READY 0xEE 421#define FW_DUMP_HOST_READY 0xEE
418#define FW_DUMP_DONE 0xFF 422#define FW_DUMP_DONE 0xFF
@@ -543,13 +547,12 @@ struct mwifiex_private {
543 u32 curr_bcn_size; 547 u32 curr_bcn_size;
544 /* spin lock for beacon buffer */ 548 /* spin lock for beacon buffer */
545 spinlock_t curr_bcn_buf_lock; 549 spinlock_t curr_bcn_buf_lock;
546 struct wireless_dev *wdev; 550 struct wireless_dev wdev;
547 struct mwifiex_chan_freq_power cfp; 551 struct mwifiex_chan_freq_power cfp;
548 char version_str[128]; 552 char version_str[128];
549#ifdef CONFIG_DEBUG_FS 553#ifdef CONFIG_DEBUG_FS
550 struct dentry *dfs_dev_dir; 554 struct dentry *dfs_dev_dir;
551#endif 555#endif
552 u8 nick_name[16];
553 u16 current_key_index; 556 u16 current_key_index;
554 struct semaphore async_sem; 557 struct semaphore async_sem;
555 struct cfg80211_scan_request *scan_request; 558 struct cfg80211_scan_request *scan_request;
@@ -564,7 +567,7 @@ struct mwifiex_private {
564 u16 beacon_idx; 567 u16 beacon_idx;
565 u16 proberesp_idx; 568 u16 proberesp_idx;
566 u16 assocresp_idx; 569 u16 assocresp_idx;
567 u16 rsn_idx; 570 u16 gen_idx;
568 u8 ap_11n_enabled; 571 u8 ap_11n_enabled;
569 u8 ap_11ac_enabled; 572 u8 ap_11ac_enabled;
570 u32 mgmt_frame_mask; 573 u32 mgmt_frame_mask;
@@ -574,6 +577,7 @@ struct mwifiex_private {
574 unsigned long csa_expire_time; 577 unsigned long csa_expire_time;
575 u8 del_list_idx; 578 u8 del_list_idx;
576 bool hs2_enabled; 579 bool hs2_enabled;
580 struct mwifiex_uap_bss_param bss_cfg;
577 struct station_parameters *sta_params; 581 struct station_parameters *sta_params;
578 struct sk_buff_head tdls_txq; 582 struct sk_buff_head tdls_txq;
579 u8 check_tdls_tx; 583 u8 check_tdls_tx;
@@ -582,6 +586,16 @@ struct mwifiex_private {
582 struct idr ack_status_frames; 586 struct idr ack_status_frames;
583 /* spin lock for ack status */ 587 /* spin lock for ack status */
584 spinlock_t ack_status_lock; 588 spinlock_t ack_status_lock;
589 /** rx histogram data */
590 struct mwifiex_histogram_data *hist_data;
591 struct cfg80211_chan_def dfs_chandef;
592 struct workqueue_struct *dfs_cac_workqueue;
593 struct delayed_work dfs_cac_work;
594 struct timer_list dfs_chan_switch_timer;
595 struct workqueue_struct *dfs_chan_sw_workqueue;
596 struct delayed_work dfs_chan_sw_work;
597 struct cfg80211_beacon_data beacon_after;
598 struct mwifiex_11h_intf_state state_11h;
585}; 599};
586 600
587enum mwifiex_ba_status { 601enum mwifiex_ba_status {
@@ -717,6 +731,7 @@ struct mwifiex_if_ops {
717 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); 731 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
718 void (*card_reset) (struct mwifiex_adapter *); 732 void (*card_reset) (struct mwifiex_adapter *);
719 void (*fw_dump)(struct mwifiex_adapter *); 733 void (*fw_dump)(struct mwifiex_adapter *);
734 int (*reg_dump)(struct mwifiex_adapter *, char *);
720 int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); 735 int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
721 void (*iface_work)(struct work_struct *work); 736 void (*iface_work)(struct work_struct *work);
722 void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter); 737 void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
@@ -724,6 +739,8 @@ struct mwifiex_if_ops {
724 739
725struct mwifiex_adapter { 740struct mwifiex_adapter {
726 u8 iface_type; 741 u8 iface_type;
742 struct mwifiex_iface_comb iface_limit;
743 struct mwifiex_iface_comb curr_iface_comb;
727 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM]; 744 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
728 u8 priv_num; 745 u8 priv_num;
729 const struct firmware *firmware; 746 const struct firmware *firmware;
@@ -731,6 +748,7 @@ struct mwifiex_adapter {
731 int winner; 748 int winner;
732 struct device *dev; 749 struct device *dev;
733 struct wiphy *wiphy; 750 struct wiphy *wiphy;
751 u8 perm_addr[ETH_ALEN];
734 bool surprise_removed; 752 bool surprise_removed;
735 u32 fw_release_number; 753 u32 fw_release_number;
736 u16 init_wait_q_woken; 754 u16 init_wait_q_woken;
@@ -744,6 +762,8 @@ struct mwifiex_adapter {
744 struct work_struct main_work; 762 struct work_struct main_work;
745 struct workqueue_struct *rx_workqueue; 763 struct workqueue_struct *rx_workqueue;
746 struct work_struct rx_work; 764 struct work_struct rx_work;
765 struct workqueue_struct *dfs_workqueue;
766 struct work_struct dfs_work;
747 bool rx_work_enabled; 767 bool rx_work_enabled;
748 bool rx_processing; 768 bool rx_processing;
749 bool delay_main_work; 769 bool delay_main_work;
@@ -823,6 +843,7 @@ struct mwifiex_adapter {
823 u16 gen_null_pkt; 843 u16 gen_null_pkt;
824 u16 pps_uapsd_mode; 844 u16 pps_uapsd_mode;
825 u32 pm_wakeup_fw_try; 845 u32 pm_wakeup_fw_try;
846 struct timer_list wakeup_timer;
826 u8 is_hs_configured; 847 u8 is_hs_configured;
827 struct mwifiex_hs_config_param hs_cfg; 848 struct mwifiex_hs_config_param hs_cfg;
828 u8 hs_activated; 849 u8 hs_activated;
@@ -865,6 +886,8 @@ struct mwifiex_adapter {
865 struct memory_type_mapping *mem_type_mapping_tbl; 886 struct memory_type_mapping *mem_type_mapping_tbl;
866 u8 num_mem_types; 887 u8 num_mem_types;
867 u8 curr_mem_idx; 888 u8 curr_mem_idx;
889 void *drv_info_dump;
890 u32 drv_info_size;
868 bool scan_chan_gap_enabled; 891 bool scan_chan_gap_enabled;
869 struct sk_buff_head rx_data_q; 892 struct sk_buff_head rx_data_q;
870 struct mwifiex_chan_stats *chan_stats; 893 struct mwifiex_chan_stats *chan_stats;
@@ -979,7 +1002,7 @@ void mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv,
979 const u8 *ra_addr); 1002 const u8 *ra_addr);
980void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb); 1003void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
981void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb); 1004void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
982int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta); 1005int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta, bool init);
983int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd, 1006int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
984 struct mwifiex_scan_cmd_config *scan_cfg); 1007 struct mwifiex_scan_cmd_config *scan_cfg);
985void mwifiex_queue_scan_cmd(struct mwifiex_private *priv, 1008void mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
@@ -1140,6 +1163,25 @@ mwifiex_get_priv(struct mwifiex_adapter *adapter,
1140} 1163}
1141 1164
1142/* 1165/*
1166 * This function returns the first available unused private structure pointer.
1167 */
1168static inline struct mwifiex_private *
1169mwifiex_get_unused_priv(struct mwifiex_adapter *adapter)
1170{
1171 int i;
1172
1173 for (i = 0; i < adapter->priv_num; i++) {
1174 if (adapter->priv[i]) {
1175 if (adapter->priv[i]->bss_mode ==
1176 NL80211_IFTYPE_UNSPECIFIED)
1177 break;
1178 }
1179 }
1180
1181 return ((i < adapter->priv_num) ? adapter->priv[i] : NULL);
1182}
1183
1184/*
1143 * This function returns the driver private structure of a network device. 1185 * This function returns the driver private structure of a network device.
1144 */ 1186 */
1145static inline struct mwifiex_private * 1187static inline struct mwifiex_private *
@@ -1230,8 +1272,6 @@ int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
1230 struct ieee80211_channel *chan, 1272 struct ieee80211_channel *chan,
1231 unsigned int duration); 1273 unsigned int duration);
1232 1274
1233int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role);
1234
1235int mwifiex_get_stats_info(struct mwifiex_private *priv, 1275int mwifiex_get_stats_info(struct mwifiex_private *priv,
1236 struct mwifiex_ds_get_stats *log); 1276 struct mwifiex_ds_get_stats *log);
1237 1277
@@ -1291,9 +1331,17 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
1291 struct cfg80211_beacon_data *data); 1331 struct cfg80211_beacon_data *data);
1292int mwifiex_del_mgmt_ies(struct mwifiex_private *priv); 1332int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
1293u8 *mwifiex_11d_code_2_region(u8 code); 1333u8 *mwifiex_11d_code_2_region(u8 code);
1334void mwifiex_uap_set_channel(struct mwifiex_uap_bss_param *bss_cfg,
1335 struct cfg80211_chan_def chandef);
1336int mwifiex_config_start_uap(struct mwifiex_private *priv,
1337 struct mwifiex_uap_bss_param *bss_cfg);
1294void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, 1338void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
1295 struct mwifiex_sta_node *node); 1339 struct mwifiex_sta_node *node);
1296 1340
1341void mwifiex_init_11h_params(struct mwifiex_private *priv);
1342int mwifiex_is_11h_active(struct mwifiex_private *priv);
1343int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag);
1344
1297void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer, 1345void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
1298 struct mwifiex_bssdescriptor *bss_desc); 1346 struct mwifiex_bssdescriptor *bss_desc);
1299int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv); 1347int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
@@ -1324,6 +1372,8 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1324 u8 *buf, int len); 1372 u8 *buf, int len);
1325int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action); 1373int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action);
1326int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac); 1374int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac);
1375int mwifiex_get_tdls_list(struct mwifiex_private *priv,
1376 struct tdls_peer_info *buf);
1327void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv); 1377void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
1328bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv); 1378bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
1329u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band, 1379u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
@@ -1340,6 +1390,11 @@ void mwifiex_check_auto_tdls(unsigned long context);
1340void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac); 1390void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
1341void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv); 1391void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
1342void mwifiex_clean_auto_tdls(struct mwifiex_private *priv); 1392void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
1393int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
1394 struct host_cmd_ds_command *cmd,
1395 void *data_buf);
1396int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
1397 struct sk_buff *skb);
1343 1398
1344void mwifiex_parse_tx_status_event(struct mwifiex_private *priv, 1399void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
1345 void *event_body); 1400 void *event_body);
@@ -1347,6 +1402,21 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
1347struct sk_buff * 1402struct sk_buff *
1348mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv, 1403mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv,
1349 struct sk_buff *skb, u8 flag, u64 *cookie); 1404 struct sk_buff *skb, u8 flag, u64 *cookie);
1405void mwifiex_dfs_cac_work_queue(struct work_struct *work);
1406void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work);
1407void mwifiex_abort_cac(struct mwifiex_private *priv);
1408int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
1409 struct sk_buff *skb);
1410
1411void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr,
1412 s8 nflr);
1413void mwifiex_hist_data_reset(struct mwifiex_private *priv);
1414void mwifiex_hist_data_add(struct mwifiex_private *priv,
1415 u8 rx_rate, s8 snr, s8 nflr);
1416u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
1417 u8 rx_rate, u8 ht_info);
1418
1419void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
1350 1420
1351#ifdef CONFIG_DEBUG_FS 1421#ifdef CONFIG_DEBUG_FS
1352void mwifiex_debugfs_init(void); 1422void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index c3a20f94f3c9..a5828da59365 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -204,6 +204,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
204 card->pcie.blksz_fw_dl = data->blksz_fw_dl; 204 card->pcie.blksz_fw_dl = data->blksz_fw_dl;
205 card->pcie.tx_buf_size = data->tx_buf_size; 205 card->pcie.tx_buf_size = data->tx_buf_size;
206 card->pcie.supports_fw_dump = data->supports_fw_dump; 206 card->pcie.supports_fw_dump = data->supports_fw_dump;
207 card->pcie.can_ext_scan = data->can_ext_scan;
207 } 208 }
208 209
209 if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops, 210 if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
@@ -1952,8 +1953,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
1952 offset += txlen; 1953 offset += txlen;
1953 } while (true); 1954 } while (true);
1954 1955
1955 dev_dbg(adapter->dev, "info:\nFW download over, size %d bytes\n", 1956 dev_notice(adapter->dev,
1956 offset); 1957 "info: FW download over, size %d bytes\n", offset);
1957 1958
1958 ret = 0; 1959 ret = 0;
1959 1960
@@ -2064,6 +2065,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
2064 * state until cookie is set */ 2065 * state until cookie is set */
2065 adapter->ps_state = PS_STATE_AWAKE; 2066 adapter->ps_state = PS_STATE_AWAKE;
2066 adapter->pm_wakeup_fw_try = false; 2067 adapter->pm_wakeup_fw_try = false;
2068 del_timer(&adapter->wakeup_timer);
2067 } 2069 }
2068 } 2070 }
2069} 2071}
@@ -2562,6 +2564,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
2562 adapter->mem_type_mapping_tbl = mem_type_mapping_tbl; 2564 adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
2563 adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl); 2565 adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
2564 strcpy(adapter->fw_name, card->pcie.firmware); 2566 strcpy(adapter->fw_name, card->pcie.firmware);
2567 adapter->ext_scan = card->pcie.can_ext_scan;
2565 2568
2566 return 0; 2569 return 0;
2567} 2570}
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 200e8b0cb582..666d40e9dbc3 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -206,6 +206,7 @@ struct mwifiex_pcie_device {
206 u16 blksz_fw_dl; 206 u16 blksz_fw_dl;
207 u16 tx_buf_size; 207 u16 tx_buf_size;
208 bool supports_fw_dump; 208 bool supports_fw_dump;
209 bool can_ext_scan;
209}; 210};
210 211
211static const struct mwifiex_pcie_device mwifiex_pcie8766 = { 212static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
@@ -214,6 +215,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
214 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, 215 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
215 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, 216 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
216 .supports_fw_dump = false, 217 .supports_fw_dump = false,
218 .can_ext_scan = true,
217}; 219};
218 220
219static const struct mwifiex_pcie_device mwifiex_pcie8897 = { 221static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
@@ -222,6 +224,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
222 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, 224 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
223 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, 225 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
224 .supports_fw_dump = true, 226 .supports_fw_dump = true,
227 .can_ext_scan = true,
225}; 228};
226 229
227struct mwifiex_evt_buf_desc { 230struct mwifiex_evt_buf_desc {
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 984a7a4fa93b..0ffdb7c5afd2 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -496,10 +496,10 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
496 496
497 for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) { 497 for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
498 498
499 if (!priv->wdev->wiphy->bands[band]) 499 if (!priv->wdev.wiphy->bands[band])
500 continue; 500 continue;
501 501
502 sband = priv->wdev->wiphy->bands[band]; 502 sband = priv->wdev.wiphy->bands[band];
503 503
504 for (i = 0; (i < sband->n_channels) ; i++) { 504 for (i = 0; (i < sband->n_channels) ; i++) {
505 ch = &sband->channels[i]; 505 ch = &sband->channels[i];
@@ -1429,6 +1429,12 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
1429 return -EBUSY; 1429 return -EBUSY;
1430 } 1430 }
1431 1431
1432 if (adapter->surprise_removed || adapter->is_cmd_timedout) {
1433 dev_err(adapter->dev,
1434 "Ignore scan. Card removed or firmware in bad state\n");
1435 return -EFAULT;
1436 }
1437
1432 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); 1438 spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
1433 adapter->scan_processing = true; 1439 adapter->scan_processing = true;
1434 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); 1440 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
@@ -1727,10 +1733,10 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
1727 1733
1728 freq = cfp ? cfp->freq : 0; 1734 freq = cfp ? cfp->freq : 0;
1729 1735
1730 chan = ieee80211_get_channel(priv->wdev->wiphy, freq); 1736 chan = ieee80211_get_channel(priv->wdev.wiphy, freq);
1731 1737
1732 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { 1738 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
1733 bss = cfg80211_inform_bss(priv->wdev->wiphy, 1739 bss = cfg80211_inform_bss(priv->wdev.wiphy,
1734 chan, CFG80211_BSS_FTYPE_UNKNOWN, 1740 chan, CFG80211_BSS_FTYPE_UNKNOWN,
1735 bssid, timestamp, 1741 bssid, timestamp,
1736 cap_info_bitmap, beacon_period, 1742 cap_info_bitmap, beacon_period,
@@ -1742,7 +1748,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
1742 !memcmp(bssid, priv->curr_bss_params.bss_descriptor 1748 !memcmp(bssid, priv->curr_bss_params.bss_descriptor
1743 .mac_address, ETH_ALEN)) 1749 .mac_address, ETH_ALEN))
1744 mwifiex_update_curr_bss_params(priv, bss); 1750 mwifiex_update_curr_bss_params(priv, bss);
1745 cfg80211_put_bss(priv->wdev->wiphy, bss); 1751 cfg80211_put_bss(priv->wdev.wiphy, bss);
1746 } 1752 }
1747 } else { 1753 } else {
1748 dev_dbg(adapter->dev, "missing BSS channel IE\n"); 1754 dev_dbg(adapter->dev, "missing BSS channel IE\n");
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 933dae137850..91e36cda9543 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -107,6 +107,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
107 card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size; 107 card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
108 card->supports_fw_dump = data->supports_fw_dump; 108 card->supports_fw_dump = data->supports_fw_dump;
109 card->auto_tdls = data->auto_tdls; 109 card->auto_tdls = data->auto_tdls;
110 card->can_ext_scan = data->can_ext_scan;
110 } 111 }
111 112
112 sdio_claim_host(func); 113 sdio_claim_host(func);
@@ -282,6 +283,9 @@ static int mwifiex_sdio_suspend(struct device *dev)
282#define SDIO_DEVICE_ID_MARVELL_8897 (0x912d) 283#define SDIO_DEVICE_ID_MARVELL_8897 (0x912d)
283/* Device ID for SD8887 */ 284/* Device ID for SD8887 */
284#define SDIO_DEVICE_ID_MARVELL_8887 (0x9135) 285#define SDIO_DEVICE_ID_MARVELL_8887 (0x9135)
286/* Device ID for SD8801 */
287#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
288
285 289
286/* WLAN IDs */ 290/* WLAN IDs */
287static const struct sdio_device_id mwifiex_ids[] = { 291static const struct sdio_device_id mwifiex_ids[] = {
@@ -295,6 +299,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
295 .driver_data = (unsigned long) &mwifiex_sdio_sd8897}, 299 .driver_data = (unsigned long) &mwifiex_sdio_sd8897},
296 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887), 300 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887),
297 .driver_data = (unsigned long)&mwifiex_sdio_sd8887}, 301 .driver_data = (unsigned long)&mwifiex_sdio_sd8887},
302 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
303 .driver_data = (unsigned long)&mwifiex_sdio_sd8801},
298 {}, 304 {},
299}; 305};
300 306
@@ -986,8 +992,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
986 offset += txlen; 992 offset += txlen;
987 } while (true); 993 } while (true);
988 994
989 dev_dbg(adapter->dev, "info: FW download over, size %d bytes\n", 995 dev_notice(adapter->dev,
990 offset); 996 "info: FW download over, size %d bytes\n", offset);
991 997
992 ret = 0; 998 ret = 0;
993done: 999done:
@@ -1882,6 +1888,7 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
1882 } 1888 }
1883 1889
1884 adapter->auto_tdls = card->auto_tdls; 1890 adapter->auto_tdls = card->auto_tdls;
1891 adapter->ext_scan = card->can_ext_scan;
1885 return ret; 1892 return ret;
1886} 1893}
1887 1894
@@ -1958,8 +1965,8 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
1958 1965
1959 pr_err("Resetting card...\n"); 1966 pr_err("Resetting card...\n");
1960 mmc_remove_host(target); 1967 mmc_remove_host(target);
1961 /* 20ms delay is based on experiment with sdhci controller */ 1968 /* 200ms delay is based on experiment with sdhci controller */
1962 mdelay(20); 1969 mdelay(200);
1963 target->rescan_entered = 0; /* rescan non-removable cards */ 1970 target->rescan_entered = 0; /* rescan non-removable cards */
1964 mmc_add_host(target); 1971 mmc_add_host(target);
1965} 1972}
@@ -2023,6 +2030,8 @@ static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
2023 u32 memory_size; 2030 u32 memory_size;
2024 static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL }; 2031 static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL };
2025 2032
2033 mwifiex_dump_drv_info(adapter);
2034
2026 if (!card->supports_fw_dump) 2035 if (!card->supports_fw_dump)
2027 return; 2036 return;
2028 2037
@@ -2166,6 +2175,99 @@ static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
2166 schedule_work(&adapter->iface_work); 2175 schedule_work(&adapter->iface_work);
2167} 2176}
2168 2177
2178/* Function to dump SDIO function registers and SDIO scratch registers in case
2179 * of FW crash
2180 */
2181static int
2182mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
2183{
2184 char *p = drv_buf;
2185 struct sdio_mmc_card *cardp = adapter->card;
2186 int ret = 0;
2187 u8 count, func, data, index = 0, size = 0;
2188 u8 reg, reg_start, reg_end;
2189 char buf[256], *ptr;
2190
2191 if (!p)
2192 return 0;
2193
2194 dev_info(adapter->dev, "SDIO register DUMP START\n");
2195
2196 mwifiex_pm_wakeup_card(adapter);
2197
2198 sdio_claim_host(cardp->func);
2199
2200 for (count = 0; count < 5; count++) {
2201 memset(buf, 0, sizeof(buf));
2202 ptr = buf;
2203
2204 switch (count) {
2205 case 0:
2206 /* Read the registers of SDIO function0 */
2207 func = count;
2208 reg_start = 0;
2209 reg_end = 9;
2210 break;
2211 case 1:
2212 /* Read the registers of SDIO function1 */
2213 func = count;
2214 reg_start = cardp->reg->func1_dump_reg_start;
2215 reg_end = cardp->reg->func1_dump_reg_end;
2216 break;
2217 case 2:
2218 index = 0;
2219 func = 1;
2220 reg_start = cardp->reg->func1_spec_reg_table[index++];
2221 size = cardp->reg->func1_spec_reg_num;
2222 reg_end = cardp->reg->func1_spec_reg_table[size-1];
2223 break;
2224 default:
2225 /* Read the scratch registers of SDIO function1 */
2226 if (count == 4)
2227 mdelay(100);
2228 func = 1;
2229 reg_start = cardp->reg->func1_scratch_reg;
2230 reg_end = reg_start + MWIFIEX_SDIO_SCRATCH_SIZE;
2231 }
2232
2233 if (count != 2)
2234 ptr += sprintf(ptr, "SDIO Func%d (%#x-%#x): ",
2235 func, reg_start, reg_end);
2236 else
2237 ptr += sprintf(ptr, "SDIO Func%d: ", func);
2238
2239 for (reg = reg_start; reg <= reg_end;) {
2240 if (func == 0)
2241 data = sdio_f0_readb(cardp->func, reg, &ret);
2242 else
2243 data = sdio_readb(cardp->func, reg, &ret);
2244
2245 if (count == 2)
2246 ptr += sprintf(ptr, "(%#x) ", reg);
2247 if (!ret) {
2248 ptr += sprintf(ptr, "%02x ", data);
2249 } else {
2250 ptr += sprintf(ptr, "ERR");
2251 break;
2252 }
2253
2254 if (count == 2 && reg < reg_end)
2255 reg = cardp->reg->func1_spec_reg_table[index++];
2256 else
2257 reg++;
2258 }
2259
2260 dev_info(adapter->dev, "%s\n", buf);
2261 p += sprintf(p, "%s\n", buf);
2262 }
2263
2264 sdio_release_host(cardp->func);
2265
2266 dev_info(adapter->dev, "SDIO register DUMP END\n");
2267
2268 return p - drv_buf;
2269}
2270
2169static struct mwifiex_if_ops sdio_ops = { 2271static struct mwifiex_if_ops sdio_ops = {
2170 .init_if = mwifiex_init_sdio, 2272 .init_if = mwifiex_init_sdio,
2171 .cleanup_if = mwifiex_cleanup_sdio, 2273 .cleanup_if = mwifiex_cleanup_sdio,
@@ -2188,6 +2290,7 @@ static struct mwifiex_if_ops sdio_ops = {
2188 .card_reset = mwifiex_sdio_card_reset, 2290 .card_reset = mwifiex_sdio_card_reset,
2189 .iface_work = mwifiex_sdio_work, 2291 .iface_work = mwifiex_sdio_work,
2190 .fw_dump = mwifiex_sdio_fw_dump, 2292 .fw_dump = mwifiex_sdio_fw_dump,
2293 .reg_dump = mwifiex_sdio_reg_dump,
2191}; 2294};
2192 2295
2193/* 2296/*
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 54c07156dd78..957cca246618 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -34,6 +34,7 @@
34#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin" 34#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
35#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" 35#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
36#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin" 36#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
37#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
37 38
38#define BLOCK_MODE 1 39#define BLOCK_MODE 1
39#define BYTE_MODE 0 40#define BYTE_MODE 0
@@ -44,6 +45,9 @@
44 45
45#define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000 46#define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000
46 47
48#define MWIFIEX_MAX_FUNC2_REG_NUM 13
49#define MWIFIEX_SDIO_SCRATCH_SIZE 10
50
47#define SDIO_MPA_ADDR_BASE 0x1000 51#define SDIO_MPA_ADDR_BASE 0x1000
48#define CTRL_PORT 0 52#define CTRL_PORT 0
49#define CTRL_PORT_MASK 0x0001 53#define CTRL_PORT_MASK 0x0001
@@ -219,6 +223,11 @@ struct mwifiex_sdio_card_reg {
219 u8 fw_dump_ctrl; 223 u8 fw_dump_ctrl;
220 u8 fw_dump_start; 224 u8 fw_dump_start;
221 u8 fw_dump_end; 225 u8 fw_dump_end;
226 u8 func1_dump_reg_start;
227 u8 func1_dump_reg_end;
228 u8 func1_scratch_reg;
229 u8 func1_spec_reg_num;
230 u8 func1_spec_reg_table[MWIFIEX_MAX_FUNC2_REG_NUM];
222}; 231};
223 232
224struct sdio_mmc_card { 233struct sdio_mmc_card {
@@ -247,6 +256,7 @@ struct sdio_mmc_card {
247 256
248 u8 *mp_regs; 257 u8 *mp_regs;
249 u8 auto_tdls; 258 u8 auto_tdls;
259 bool can_ext_scan;
250 260
251 struct mwifiex_sdio_mpa_tx mpa_tx; 261 struct mwifiex_sdio_mpa_tx mpa_tx;
252 struct mwifiex_sdio_mpa_rx mpa_rx; 262 struct mwifiex_sdio_mpa_rx mpa_rx;
@@ -264,6 +274,7 @@ struct mwifiex_sdio_device {
264 u32 mp_tx_agg_buf_size; 274 u32 mp_tx_agg_buf_size;
265 u32 mp_rx_agg_buf_size; 275 u32 mp_rx_agg_buf_size;
266 u8 auto_tdls; 276 u8 auto_tdls;
277 bool can_ext_scan;
267}; 278};
268 279
269static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = { 280static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -291,6 +302,11 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
291 .rd_len_p0_l = 0x08, 302 .rd_len_p0_l = 0x08,
292 .rd_len_p0_u = 0x09, 303 .rd_len_p0_u = 0x09,
293 .card_misc_cfg_reg = 0x6c, 304 .card_misc_cfg_reg = 0x6c,
305 .func1_dump_reg_start = 0x0,
306 .func1_dump_reg_end = 0x9,
307 .func1_scratch_reg = 0x60,
308 .func1_spec_reg_num = 5,
309 .func1_spec_reg_table = {0x28, 0x30, 0x34, 0x38, 0x3c},
294}; 310};
295 311
296static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { 312static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
@@ -335,6 +351,12 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
335 .fw_dump_ctrl = 0xe2, 351 .fw_dump_ctrl = 0xe2,
336 .fw_dump_start = 0xe3, 352 .fw_dump_start = 0xe3,
337 .fw_dump_end = 0xea, 353 .fw_dump_end = 0xea,
354 .func1_dump_reg_start = 0x0,
355 .func1_dump_reg_end = 0xb,
356 .func1_scratch_reg = 0xc0,
357 .func1_spec_reg_num = 8,
358 .func1_spec_reg_table = {0x4C, 0x50, 0x54, 0x55, 0x58,
359 0x59, 0x5c, 0x5d},
338}; 360};
339 361
340static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = { 362static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
@@ -376,6 +398,13 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
376 .cmd_cfg_1 = 0xc5, 398 .cmd_cfg_1 = 0xc5,
377 .cmd_cfg_2 = 0xc6, 399 .cmd_cfg_2 = 0xc6,
378 .cmd_cfg_3 = 0xc7, 400 .cmd_cfg_3 = 0xc7,
401 .func1_dump_reg_start = 0x10,
402 .func1_dump_reg_end = 0x17,
403 .func1_scratch_reg = 0x90,
404 .func1_spec_reg_num = 13,
405 .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
406 0x61, 0x62, 0x64, 0x65, 0x66,
407 0x68, 0x69, 0x6a},
379}; 408};
380 409
381static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = { 410static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
@@ -390,6 +419,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
390 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, 419 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
391 .supports_fw_dump = false, 420 .supports_fw_dump = false,
392 .auto_tdls = false, 421 .auto_tdls = false,
422 .can_ext_scan = false,
393}; 423};
394 424
395static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { 425static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -404,6 +434,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
404 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, 434 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
405 .supports_fw_dump = false, 435 .supports_fw_dump = false,
406 .auto_tdls = false, 436 .auto_tdls = false,
437 .can_ext_scan = true,
407}; 438};
408 439
409static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { 440static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -418,6 +449,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
418 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K, 449 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
419 .supports_fw_dump = false, 450 .supports_fw_dump = false,
420 .auto_tdls = false, 451 .auto_tdls = false,
452 .can_ext_scan = true,
421}; 453};
422 454
423static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { 455static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -432,6 +464,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
432 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, 464 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
433 .supports_fw_dump = true, 465 .supports_fw_dump = true,
434 .auto_tdls = false, 466 .auto_tdls = false,
467 .can_ext_scan = true,
435}; 468};
436 469
437static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { 470static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
@@ -446,6 +479,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
446 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, 479 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
447 .supports_fw_dump = false, 480 .supports_fw_dump = false,
448 .auto_tdls = true, 481 .auto_tdls = true,
482 .can_ext_scan = true,
483};
484
485static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
486 .firmware = SD8801_DEFAULT_FW_NAME,
487 .reg = &mwifiex_reg_sd87xx,
488 .max_ports = 16,
489 .mp_agg_pkt_limit = 8,
490 .supports_sdio_new_mode = false,
491 .has_control_mask = true,
492 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
493 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
494 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
495 .supports_fw_dump = false,
496 .auto_tdls = false,
497 .can_ext_scan = true,
449}; 498};
450 499
451/* 500/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 1c2ca291d1f5..f7d204ffd6e9 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -26,6 +26,10 @@
26#include "11n.h" 26#include "11n.h"
27#include "11ac.h" 27#include "11ac.h"
28 28
29static bool disable_auto_ds;
30module_param(disable_auto_ds, bool, 0);
31MODULE_PARM_DESC(disable_auto_ds,
32 "deepsleep enabled=0(default), deepsleep disabled=1");
29/* 33/*
30 * This function prepares command to set/get RSSI information. 34 * This function prepares command to set/get RSSI information.
31 * 35 *
@@ -1893,6 +1897,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1893 case HostCmd_CMD_TDLS_OPER: 1897 case HostCmd_CMD_TDLS_OPER:
1894 ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf); 1898 ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
1895 break; 1899 break;
1900 case HostCmd_CMD_CHAN_REPORT_REQUEST:
1901 ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
1902 data_buf);
1903 break;
1896 default: 1904 default:
1897 dev_err(priv->adapter->dev, 1905 dev_err(priv->adapter->dev,
1898 "PREP_CMD: unknown cmd- %#x\n", cmd_no); 1906 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1907,6 +1915,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1907 * 1915 *
1908 * This is called after firmware download to bring the card to 1916 * This is called after firmware download to bring the card to
1909 * working state. 1917 * working state.
1918 * Function is also called during reinitialization of virtual
1919 * interfaces.
1910 * 1920 *
1911 * The following commands are issued sequentially - 1921 * The following commands are issued sequentially -
1912 * - Set PCI-Express host buffer configuration (PCIE only) 1922 * - Set PCI-Express host buffer configuration (PCIE only)
@@ -1921,7 +1931,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1921 * - Set 11d control 1931 * - Set 11d control
1922 * - Set MAC control (this must be the last command to initialize firmware) 1932 * - Set MAC control (this must be the last command to initialize firmware)
1923 */ 1933 */
1924int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta) 1934int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
1925{ 1935{
1926 struct mwifiex_adapter *adapter = priv->adapter; 1936 struct mwifiex_adapter *adapter = priv->adapter;
1927 int ret; 1937 int ret;
@@ -2031,7 +2041,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
2031 if (ret) 2041 if (ret)
2032 return -1; 2042 return -1;
2033 2043
2034 if (first_sta && priv->adapter->iface_type != MWIFIEX_USB && 2044 if (!disable_auto_ds &&
2045 first_sta && priv->adapter->iface_type != MWIFIEX_USB &&
2035 priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { 2046 priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
2036 /* Enable auto deep sleep */ 2047 /* Enable auto deep sleep */
2037 auto_ds.auto_ds = DEEP_SLEEP_ON; 2048 auto_ds.auto_ds = DEEP_SLEEP_ON;
@@ -2054,9 +2065,6 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
2054 "11D: failed to enable 11D\n"); 2065 "11D: failed to enable 11D\n");
2055 } 2066 }
2056 2067
2057 /* set last_init_cmd before sending the command */
2058 priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
2059
2060 /* Send cmd to FW to configure 11n specific configuration 2068 /* Send cmd to FW to configure 11n specific configuration
2061 * (Short GI, Channel BW, Green field support etc.) for transmit 2069 * (Short GI, Channel BW, Green field support etc.) for transmit
2062 */ 2070 */
@@ -2064,7 +2072,11 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
2064 ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_CFG, 2072 ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_CFG,
2065 HostCmd_ACT_GEN_SET, 0, &tx_cfg, true); 2073 HostCmd_ACT_GEN_SET, 0, &tx_cfg, true);
2066 2074
2067 ret = -EINPROGRESS; 2075 if (init) {
2076 /* set last_init_cmd before sending the command */
2077 priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
2078 ret = -EINPROGRESS;
2079 }
2068 2080
2069 return ret; 2081 return ret;
2070} 2082}
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index b65e1014b0fc..5f8da5924666 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -248,6 +248,8 @@ static int mwifiex_ret_get_log(struct mwifiex_private *priv,
248 le32_to_cpu(get_log->wep_icv_err_cnt[2]); 248 le32_to_cpu(get_log->wep_icv_err_cnt[2]);
249 stats->wep_icv_error[3] = 249 stats->wep_icv_error[3] =
250 le32_to_cpu(get_log->wep_icv_err_cnt[3]); 250 le32_to_cpu(get_log->wep_icv_err_cnt[3]);
251 stats->bcn_rcv_cnt = le32_to_cpu(get_log->bcn_rcv_cnt);
252 stats->bcn_miss_cnt = le32_to_cpu(get_log->bcn_miss_cnt);
251 } 253 }
252 254
253 return 0; 255 return 0;
@@ -1103,6 +1105,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
1103 case HostCmd_CMD_UAP_SYS_CONFIG: 1105 case HostCmd_CMD_UAP_SYS_CONFIG:
1104 break; 1106 break;
1105 case HostCmd_CMD_UAP_BSS_START: 1107 case HostCmd_CMD_UAP_BSS_START:
1108 adapter->tx_lock_flag = false;
1109 adapter->pps_uapsd_mode = false;
1110 adapter->delay_null_pkt = false;
1106 priv->bss_started = 1; 1111 priv->bss_started = 1;
1107 break; 1112 break;
1108 case HostCmd_CMD_UAP_BSS_STOP: 1113 case HostCmd_CMD_UAP_BSS_STOP:
@@ -1117,6 +1122,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
1117 case HostCmd_CMD_TDLS_OPER: 1122 case HostCmd_CMD_TDLS_OPER:
1118 ret = mwifiex_ret_tdls_oper(priv, resp); 1123 ret = mwifiex_ret_tdls_oper(priv, resp);
1119 break; 1124 break;
1125 case HostCmd_CMD_CHAN_REPORT_REQUEST:
1126 break;
1120 default: 1127 default:
1121 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", 1128 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
1122 resp->command); 1129 resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index b8c171df6223..80ffe7412496 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -90,6 +90,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
90 priv->is_data_rate_auto = true; 90 priv->is_data_rate_auto = true;
91 priv->data_rate = 0; 91 priv->data_rate = 0;
92 92
93 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
94 GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && priv->hist_data)
95 mwifiex_hist_data_reset(priv);
96
93 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { 97 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
94 priv->adhoc_state = ADHOC_IDLE; 98 priv->adhoc_state = ADHOC_IDLE;
95 priv->adhoc_is_link_sensed = false; 99 priv->adhoc_is_link_sensed = false;
@@ -308,6 +312,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
308 adapter->ps_state = PS_STATE_AWAKE; 312 adapter->ps_state = PS_STATE_AWAKE;
309 adapter->pm_wakeup_card_req = false; 313 adapter->pm_wakeup_card_req = false;
310 adapter->pm_wakeup_fw_try = false; 314 adapter->pm_wakeup_fw_try = false;
315 del_timer_sync(&adapter->wakeup_timer);
311 break; 316 break;
312 } 317 }
313 if (!mwifiex_send_null_packet 318 if (!mwifiex_send_null_packet
@@ -322,6 +327,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
322 adapter->ps_state = PS_STATE_AWAKE; 327 adapter->ps_state = PS_STATE_AWAKE;
323 adapter->pm_wakeup_card_req = false; 328 adapter->pm_wakeup_card_req = false;
324 adapter->pm_wakeup_fw_try = false; 329 adapter->pm_wakeup_fw_try = false;
330 del_timer_sync(&adapter->wakeup_timer);
325 331
326 break; 332 break;
327 333
@@ -480,7 +486,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
480 486
481 case EVENT_REMAIN_ON_CHAN_EXPIRED: 487 case EVENT_REMAIN_ON_CHAN_EXPIRED:
482 dev_dbg(adapter->dev, "event: Remain on channel expired\n"); 488 dev_dbg(adapter->dev, "event: Remain on channel expired\n");
483 cfg80211_remain_on_channel_expired(priv->wdev, 489 cfg80211_remain_on_channel_expired(&priv->wdev,
484 priv->roc_cfg.cookie, 490 priv->roc_cfg.cookie,
485 &priv->roc_cfg.chan, 491 &priv->roc_cfg.chan,
486 GFP_ATOMIC); 492 GFP_ATOMIC);
@@ -509,6 +515,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
509 mwifiex_parse_tx_status_event(priv, adapter->event_body); 515 mwifiex_parse_tx_status_event(priv, adapter->event_body);
510 break; 516 break;
511 517
518 case EVENT_CHANNEL_REPORT_RDY:
519 dev_dbg(adapter->dev, "event: Channel Report\n");
520 ret = mwifiex_11h_handle_chanrpt_ready(priv,
521 adapter->event_skb);
522 break;
523 case EVENT_RADAR_DETECTED:
524 dev_dbg(adapter->dev, "event: Radar detected\n");
525 ret = mwifiex_11h_handle_radar_detected(priv,
526 adapter->event_skb);
527 break;
512 default: 528 default:
513 dev_dbg(adapter->dev, "event: unknown event id: %#x\n", 529 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
514 eventcause); 530 eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 1626868a4b5c..0599e41e253c 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -219,7 +219,7 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
219 219
220 if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) { 220 if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) {
221 rcu_read_unlock(); 221 rcu_read_unlock();
222 wiphy_dbg(priv->wdev->wiphy, 222 wiphy_dbg(priv->wdev.wiphy,
223 "11D: skip setting domain info in FW\n"); 223 "11D: skip setting domain info in FW\n");
224 return 0; 224 return 0;
225 } 225 }
@@ -902,9 +902,12 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
902 if (wep_key->key_length) { 902 if (wep_key->key_length) {
903 void *enc_key; 903 void *enc_key;
904 904
905 if (encrypt_key->key_disable) 905 if (encrypt_key->key_disable) {
906 memset(&priv->wep_key[index], 0, 906 memset(&priv->wep_key[index], 0,
907 sizeof(struct mwifiex_wep_key)); 907 sizeof(struct mwifiex_wep_key));
908 if (wep_key->key_length)
909 goto done;
910 }
908 911
909 if (adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2) 912 if (adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2)
910 enc_key = encrypt_key; 913 enc_key = encrypt_key;
@@ -918,6 +921,7 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
918 return ret; 921 return ret;
919 } 922 }
920 923
924done:
921 if (priv->sec_info.wep_enabled) 925 if (priv->sec_info.wep_enabled)
922 priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE; 926 priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
923 else 927 else
@@ -1131,36 +1135,6 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
1131 return roc_cfg.status; 1135 return roc_cfg.status;
1132} 1136}
1133 1137
1134int
1135mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
1136{
1137 if (GET_BSS_ROLE(priv) == bss_role) {
1138 dev_dbg(priv->adapter->dev,
1139 "info: already in the desired role.\n");
1140 return 0;
1141 }
1142
1143 mwifiex_free_priv(priv);
1144 mwifiex_init_priv(priv);
1145
1146 priv->bss_role = bss_role;
1147 switch (bss_role) {
1148 case MWIFIEX_BSS_ROLE_UAP:
1149 priv->bss_mode = NL80211_IFTYPE_AP;
1150 break;
1151 case MWIFIEX_BSS_ROLE_STA:
1152 case MWIFIEX_BSS_ROLE_ANY:
1153 default:
1154 priv->bss_mode = NL80211_IFTYPE_STATION;
1155 break;
1156 }
1157
1158 mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
1159 HostCmd_ACT_GEN_SET, 0, NULL, true);
1160
1161 return mwifiex_sta_init_cmd(priv, false);
1162}
1163
1164/* 1138/*
1165 * Sends IOCTL request to get statistics information. 1139 * Sends IOCTL request to get statistics information.
1166 * 1140 *
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index c2ad3b63ae70..b8729c9394e9 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -90,6 +90,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
90 struct ethhdr *eth; 90 struct ethhdr *eth;
91 u16 rx_pkt_off, rx_pkt_len; 91 u16 rx_pkt_off, rx_pkt_len;
92 u8 *offset; 92 u8 *offset;
93 u8 adj_rx_rate = 0;
93 94
94 local_rx_pd = (struct rxpd *) (skb->data); 95 local_rx_pd = (struct rxpd *) (skb->data);
95 96
@@ -155,6 +156,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
155 156
156 priv->rxpd_htinfo = local_rx_pd->ht_info; 157 priv->rxpd_htinfo = local_rx_pd->ht_info;
157 158
159 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
160 GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
161 adj_rx_rate = mwifiex_adjust_data_rate(priv, priv->rxpd_rate,
162 priv->rxpd_htinfo);
163 mwifiex_hist_data_add(priv, adj_rx_rate, local_rx_pd->snr,
164 local_rx_pd->nf);
165 }
166
158 ret = mwifiex_recv_packet(priv, skb); 167 ret = mwifiex_recv_packet(priv, skb);
159 if (ret == -1) 168 if (ret == -1)
160 dev_err(priv->adapter->dev, "recv packet failed\n"); 169 dev_err(priv->adapter->dev, "recv packet failed\n");
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index b896d7375b52..5ce2d9a4f919 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -47,8 +47,10 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
47 struct mwifiex_adapter *adapter = priv->adapter; 47 struct mwifiex_adapter *adapter = priv->adapter;
48 struct txpd *local_tx_pd; 48 struct txpd *local_tx_pd;
49 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); 49 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
50 u8 pad; 50 unsigned int pad;
51 u16 pkt_type, pkt_offset; 51 u16 pkt_type, pkt_offset;
52 int hroom = (priv->adapter->iface_type == MWIFIEX_USB) ? 0 :
53 INTF_HEADER_LEN;
52 54
53 if (!skb->len) { 55 if (!skb->len) {
54 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); 56 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
@@ -56,13 +58,12 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
56 return skb->data; 58 return skb->data;
57 } 59 }
58 60
59 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; 61 BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN);
60 62
61 /* If skb->data is not aligned; add padding */ 63 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
62 pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
63 64
64 BUG_ON(skb_headroom(skb) < (sizeof(*local_tx_pd) + INTF_HEADER_LEN 65 pad = ((void *)skb->data - (sizeof(*local_tx_pd) + hroom)-
65 + pad)); 66 NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
66 skb_push(skb, sizeof(*local_tx_pd) + pad); 67 skb_push(skb, sizeof(*local_tx_pd) + pad);
67 68
68 local_tx_pd = (struct txpd *) skb->data; 69 local_tx_pd = (struct txpd *) skb->data;
@@ -70,8 +71,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
70 local_tx_pd->bss_num = priv->bss_num; 71 local_tx_pd->bss_num = priv->bss_num;
71 local_tx_pd->bss_type = priv->bss_type; 72 local_tx_pd->bss_type = priv->bss_type;
72 local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len - 73 local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len -
73 (sizeof(struct txpd) 74 (sizeof(struct txpd) +
74 + pad))); 75 pad)));
75 76
76 local_tx_pd->priority = (u8) skb->priority; 77 local_tx_pd->priority = (u8) skb->priority;
77 local_tx_pd->pkt_delay_2ms = 78 local_tx_pd->pkt_delay_2ms =
@@ -115,7 +116,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
115 local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset); 116 local_tx_pd->tx_pkt_offset = cpu_to_le16(pkt_offset);
116 117
117 /* make space for INTF_HEADER_LEN */ 118 /* make space for INTF_HEADER_LEN */
118 skb_push(skb, INTF_HEADER_LEN); 119 skb_push(skb, hroom);
119 120
120 if (!local_tx_pd->tx_control) 121 if (!local_tx_pd->tx_control)
121 /* TxCtrl set by user or default */ 122 /* TxCtrl set by user or default */
@@ -182,9 +183,13 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
182 } 183 }
183 switch (ret) { 184 switch (ret) {
184 case -EBUSY: 185 case -EBUSY:
185 adapter->data_sent = true; 186 dev_kfree_skb_any(skb);
186 /* Fall through FAILURE handling */ 187 dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
188 __func__, ret);
189 adapter->dbg.num_tx_host_to_card_failure++;
190 break;
187 case -1: 191 case -1:
192 adapter->data_sent = false;
188 dev_kfree_skb_any(skb); 193 dev_kfree_skb_any(skb);
189 dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n", 194 dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
190 __func__, ret); 195 __func__, ret);
@@ -197,6 +202,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
197 adapter->tx_lock_flag = true; 202 adapter->tx_lock_flag = true;
198 break; 203 break;
199 case -EINPROGRESS: 204 case -EINPROGRESS:
205 adapter->tx_lock_flag = true;
200 break; 206 break;
201 default: 207 default:
202 break; 208 break;
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 22884b429be7..087d84762cd3 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -1123,6 +1123,36 @@ int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac)
1123 return TDLS_NOT_SETUP; 1123 return TDLS_NOT_SETUP;
1124} 1124}
1125 1125
1126int mwifiex_get_tdls_list(struct mwifiex_private *priv,
1127 struct tdls_peer_info *buf)
1128{
1129 struct mwifiex_sta_node *sta_ptr;
1130 struct tdls_peer_info *peer = buf;
1131 int count = 0;
1132 unsigned long flags;
1133
1134 if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
1135 return 0;
1136
1137 /* make sure we are in station mode and connected */
1138 if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
1139 return 0;
1140
1141 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
1142 list_for_each_entry(sta_ptr, &priv->sta_list, list) {
1143 if (sta_ptr->tdls_status == TDLS_SETUP_COMPLETE) {
1144 ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
1145 peer++;
1146 count++;
1147 if (count >= MWIFIEX_MAX_TDLS_PEER_SUPPORTED)
1148 break;
1149 }
1150 }
1151 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
1152
1153 return count;
1154}
1155
1126void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv) 1156void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
1127{ 1157{
1128 struct mwifiex_sta_node *sta_ptr; 1158 struct mwifiex_sta_node *sta_ptr;
@@ -1367,9 +1397,8 @@ void mwifiex_check_auto_tdls(unsigned long context)
1367 1397
1368void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv) 1398void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv)
1369{ 1399{
1370 init_timer(&priv->auto_tdls_timer); 1400 setup_timer(&priv->auto_tdls_timer, mwifiex_check_auto_tdls,
1371 priv->auto_tdls_timer.function = mwifiex_check_auto_tdls; 1401 (unsigned long)priv);
1372 priv->auto_tdls_timer.data = (unsigned long)priv;
1373 priv->auto_tdls_timer_active = true; 1402 priv->auto_tdls_timer_active = true;
1374 mod_timer(&priv->auto_tdls_timer, 1403 mod_timer(&priv->auto_tdls_timer,
1375 jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S)); 1404 jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 6ae133333363..ac93557cbdc9 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -227,7 +227,7 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
227 /* consumes ack_skb */ 227 /* consumes ack_skb */
228 skb_complete_wifi_ack(ack_skb, !tx_status->status); 228 skb_complete_wifi_ack(ack_skb, !tx_status->status);
229 } else { 229 } else {
230 cfg80211_mgmt_tx_status(priv->wdev, tx_info->cookie, 230 cfg80211_mgmt_tx_status(&priv->wdev, tx_info->cookie,
231 ack_skb->data, ack_skb->len, 231 ack_skb->data, ack_skb->len,
232 !tx_status->status, GFP_ATOMIC); 232 !tx_status->status, GFP_ATOMIC);
233 dev_kfree_skb_any(ack_skb); 233 dev_kfree_skb_any(ack_skb);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 0f347fdefa0a..f5c2af01ba0a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -761,6 +761,11 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
761 if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf)) 761 if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf))
762 return -1; 762 return -1;
763 break; 763 break;
764 case HostCmd_CMD_CHAN_REPORT_REQUEST:
765 if (mwifiex_cmd_issue_chan_report_request(priv, cmd_buf,
766 data_buf))
767 return -1;
768 break;
764 default: 769 default:
765 dev_err(priv->adapter->dev, 770 dev_err(priv->adapter->dev,
766 "PREP_CMD: unknown cmd %#x\n", cmd_no); 771 "PREP_CMD: unknown cmd %#x\n", cmd_no);
@@ -769,3 +774,68 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
769 774
770 return 0; 775 return 0;
771} 776}
777
778void mwifiex_uap_set_channel(struct mwifiex_uap_bss_param *bss_cfg,
779 struct cfg80211_chan_def chandef)
780{
781 u8 config_bands = 0;
782
783 bss_cfg->channel = ieee80211_frequency_to_channel(
784 chandef.chan->center_freq);
785
786 /* Set appropriate bands */
787 if (chandef.chan->band == IEEE80211_BAND_2GHZ) {
788 bss_cfg->band_cfg = BAND_CONFIG_BG;
789 config_bands = BAND_B | BAND_G;
790
791 if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
792 config_bands |= BAND_GN;
793 } else {
794 bss_cfg->band_cfg = BAND_CONFIG_A;
795 config_bands = BAND_A;
796
797 if (chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
798 config_bands |= BAND_AN;
799
800 if (chandef.width > NL80211_CHAN_WIDTH_40)
801 config_bands |= BAND_AAC;
802 }
803}
804
805int mwifiex_config_start_uap(struct mwifiex_private *priv,
806 struct mwifiex_uap_bss_param *bss_cfg)
807{
808 if (mwifiex_del_mgmt_ies(priv))
809 dev_err(priv->adapter->dev, "Failed to delete mgmt IEs!\n");
810
811 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
812 HostCmd_ACT_GEN_SET, 0, NULL, true)) {
813 dev_err(priv->adapter->dev, "Failed to stop the BSS\n");
814 return -1;
815 }
816
817 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
818 HostCmd_ACT_GEN_SET,
819 UAP_BSS_PARAMS_I, bss_cfg, false)) {
820 dev_err(priv->adapter->dev, "Failed to set the SSID\n");
821 return -1;
822 }
823
824 if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
825 HostCmd_ACT_GEN_SET, 0, NULL, false)) {
826 dev_err(priv->adapter->dev, "Failed to start the BSS\n");
827 return -1;
828 }
829
830 if (priv->sec_info.wep_enabled)
831 priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
832 else
833 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
834
835 if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
836 HostCmd_ACT_GEN_SET, 0,
837 &priv->curr_pkt_filter, true))
838 return -1;
839
840 return 0;
841}
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index c54a537e31fb..f4794cdc36d2 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -68,7 +68,6 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
68 len = ETH_ALEN; 68 len = ETH_ALEN;
69 69
70 if (len != -1) { 70 if (len != -1) {
71 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
72 sinfo.assoc_req_ies = &event->data[len]; 71 sinfo.assoc_req_ies = &event->data[len];
73 len = (u8 *)sinfo.assoc_req_ies - 72 len = (u8 *)sinfo.assoc_req_ies -
74 (u8 *)&event->frame_control; 73 (u8 *)&event->frame_control;
@@ -132,6 +131,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
132 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); 131 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
133 memcpy(priv->netdev->dev_addr, adapter->event_body + 2, 132 memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
134 ETH_ALEN); 133 ETH_ALEN);
134 if (priv->hist_data)
135 mwifiex_hist_data_reset(priv);
135 break; 136 break;
136 case EVENT_UAP_MIC_COUNTERMEASURES: 137 case EVENT_UAP_MIC_COUNTERMEASURES:
137 /* For future development */ 138 /* For future development */
@@ -177,6 +178,53 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
177 dev_dbg(adapter->dev, "event: TX_STATUS Report\n"); 178 dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
178 mwifiex_parse_tx_status_event(priv, adapter->event_body); 179 mwifiex_parse_tx_status_event(priv, adapter->event_body);
179 break; 180 break;
181 case EVENT_PS_SLEEP:
182 dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
183
184 adapter->ps_state = PS_STATE_PRE_SLEEP;
185
186 mwifiex_check_ps_cond(adapter);
187 break;
188
189 case EVENT_PS_AWAKE:
190 dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
191 if (!adapter->pps_uapsd_mode &&
192 priv->media_connected && adapter->sleep_period.period) {
193 adapter->pps_uapsd_mode = true;
194 dev_dbg(adapter->dev,
195 "event: PPS/UAPSD mode activated\n");
196 }
197 adapter->tx_lock_flag = false;
198 if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
199 if (mwifiex_check_last_packet_indication(priv)) {
200 if (adapter->data_sent) {
201 adapter->ps_state = PS_STATE_AWAKE;
202 adapter->pm_wakeup_card_req = false;
203 adapter->pm_wakeup_fw_try = false;
204 break;
205 }
206 if (!mwifiex_send_null_packet
207 (priv,
208 MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
209 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET))
210 adapter->ps_state =
211 PS_STATE_SLEEP;
212 return 0;
213 }
214 }
215 adapter->ps_state = PS_STATE_AWAKE;
216 adapter->pm_wakeup_card_req = false;
217 adapter->pm_wakeup_fw_try = false;
218 break;
219
220 case EVENT_CHANNEL_REPORT_RDY:
221 dev_dbg(adapter->dev, "event: Channel Report\n");
222 mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb);
223 break;
224 case EVENT_RADAR_DETECTED:
225 dev_dbg(adapter->dev, "event: Radar detected\n");
226 mwifiex_11h_handle_radar_detected(priv, adapter->event_skb);
227 break;
180 default: 228 default:
181 dev_dbg(adapter->dev, "event: unknown event id: %#x\n", 229 dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
182 eventcause); 230 eventcause);
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index be3a203a529b..38ac4d74c486 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -348,8 +348,10 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
348 struct mwifiex_adapter *adapter = priv->adapter; 348 struct mwifiex_adapter *adapter = priv->adapter;
349 struct uap_txpd *txpd; 349 struct uap_txpd *txpd;
350 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); 350 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
351 int pad, len; 351 int pad;
352 u16 pkt_type; 352 u16 pkt_type, pkt_offset;
353 int hroom = (priv->adapter->iface_type == MWIFIEX_USB) ? 0 :
354 INTF_HEADER_LEN;
353 355
354 if (!skb->len) { 356 if (!skb->len) {
355 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); 357 dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
@@ -357,22 +359,21 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
357 return skb->data; 359 return skb->data;
358 } 360 }
359 361
360 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; 362 BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN);
361
362 /* If skb->data is not aligned, add padding */
363 pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
364 363
365 len = sizeof(*txpd) + pad; 364 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
366 365
367 BUG_ON(skb_headroom(skb) < len + INTF_HEADER_LEN); 366 pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) &
367 (MWIFIEX_DMA_ALIGN_SZ - 1);
368 368
369 skb_push(skb, len); 369 skb_push(skb, sizeof(*txpd) + pad);
370 370
371 txpd = (struct uap_txpd *)skb->data; 371 txpd = (struct uap_txpd *)skb->data;
372 memset(txpd, 0, sizeof(*txpd)); 372 memset(txpd, 0, sizeof(*txpd));
373 txpd->bss_num = priv->bss_num; 373 txpd->bss_num = priv->bss_num;
374 txpd->bss_type = priv->bss_type; 374 txpd->bss_type = priv->bss_type;
375 txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - len)); 375 txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - (sizeof(*txpd) +
376 pad)));
376 txpd->priority = (u8)skb->priority; 377 txpd->priority = (u8)skb->priority;
377 378
378 txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb); 379 txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
@@ -392,16 +393,17 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
392 cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]); 393 cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
393 394
394 /* Offset of actual data */ 395 /* Offset of actual data */
396 pkt_offset = sizeof(*txpd) + pad;
395 if (pkt_type == PKT_TYPE_MGMT) { 397 if (pkt_type == PKT_TYPE_MGMT) {
396 /* Set the packet type and add header for management frame */ 398 /* Set the packet type and add header for management frame */
397 txpd->tx_pkt_type = cpu_to_le16(pkt_type); 399 txpd->tx_pkt_type = cpu_to_le16(pkt_type);
398 len += MWIFIEX_MGMT_FRAME_HEADER_SIZE; 400 pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
399 } 401 }
400 402
401 txpd->tx_pkt_offset = cpu_to_le16(len); 403 txpd->tx_pkt_offset = cpu_to_le16(pkt_offset);
402 404
403 /* make space for INTF_HEADER_LEN */ 405 /* make space for INTF_HEADER_LEN */
404 skb_push(skb, INTF_HEADER_LEN); 406 skb_push(skb, hroom);
405 407
406 if (!txpd->tx_control) 408 if (!txpd->tx_control)
407 /* TxCtrl set by user or default */ 409 /* TxCtrl set by user or default */
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 1b56495ec872..223873022ffe 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -37,6 +37,11 @@ static struct usb_device_id mwifiex_usb_table[] = {
37 {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8797_PID_2, 37 {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8797_PID_2,
38 USB_CLASS_VENDOR_SPEC, 38 USB_CLASS_VENDOR_SPEC,
39 USB_SUBCLASS_VENDOR_SPEC, 0xff)}, 39 USB_SUBCLASS_VENDOR_SPEC, 0xff)},
40 /* 8801 */
41 {USB_DEVICE(USB8XXX_VID, USB8801_PID_1)},
42 {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8801_PID_2,
43 USB_CLASS_VENDOR_SPEC,
44 USB_SUBCLASS_VENDOR_SPEC, 0xff)},
40 /* 8897 */ 45 /* 8897 */
41 {USB_DEVICE(USB8XXX_VID, USB8897_PID_1)}, 46 {USB_DEVICE(USB8XXX_VID, USB8897_PID_1)},
42 {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2, 47 {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
@@ -361,11 +366,13 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
361 switch (id_product) { 366 switch (id_product) {
362 case USB8766_PID_1: 367 case USB8766_PID_1:
363 case USB8797_PID_1: 368 case USB8797_PID_1:
369 case USB8801_PID_1:
364 case USB8897_PID_1: 370 case USB8897_PID_1:
365 card->usb_boot_state = USB8XXX_FW_DNLD; 371 card->usb_boot_state = USB8XXX_FW_DNLD;
366 break; 372 break;
367 case USB8766_PID_2: 373 case USB8766_PID_2:
368 case USB8797_PID_2: 374 case USB8797_PID_2:
375 case USB8801_PID_2:
369 case USB8897_PID_2: 376 case USB8897_PID_2:
370 card->usb_boot_state = USB8XXX_FW_READY; 377 card->usb_boot_state = USB8XXX_FW_READY;
371 break; 378 break;
@@ -792,11 +799,19 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
792 case USB8897_PID_2: 799 case USB8897_PID_2:
793 adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; 800 adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
794 strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME); 801 strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
802 adapter->ext_scan = true;
795 break; 803 break;
796 case USB8766_PID_1: 804 case USB8766_PID_1:
797 case USB8766_PID_2: 805 case USB8766_PID_2:
798 adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; 806 adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
799 strcpy(adapter->fw_name, USB8766_DEFAULT_FW_NAME); 807 strcpy(adapter->fw_name, USB8766_DEFAULT_FW_NAME);
808 adapter->ext_scan = true;
809 break;
810 case USB8801_PID_1:
811 case USB8801_PID_2:
812 adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
813 strcpy(adapter->fw_name, USB8801_DEFAULT_FW_NAME);
814 adapter->ext_scan = false;
800 break; 815 break;
801 case USB8797_PID_1: 816 case USB8797_PID_1:
802 case USB8797_PID_2: 817 case USB8797_PID_2:
@@ -930,7 +945,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
930 } while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries); 945 } while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries);
931 946
932cleanup: 947cleanup:
933 dev_dbg(adapter->dev, "%s: %d bytes downloaded\n", __func__, tlen); 948 dev_notice(adapter->dev,
949 "info: FW download over, size %d bytes\n", tlen);
934 950
935 kfree(recv_buff); 951 kfree(recv_buff);
936 kfree(fwdata); 952 kfree(fwdata);
@@ -990,6 +1006,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
990{ 1006{
991 /* Simulation of HS_AWAKE event */ 1007 /* Simulation of HS_AWAKE event */
992 adapter->pm_wakeup_fw_try = false; 1008 adapter->pm_wakeup_fw_try = false;
1009 del_timer_sync(&adapter->wakeup_timer);
993 adapter->pm_wakeup_card_req = false; 1010 adapter->pm_wakeup_card_req = false;
994 adapter->ps_state = PS_STATE_AWAKE; 1011 adapter->ps_state = PS_STATE_AWAKE;
995 1012
@@ -1010,6 +1027,13 @@ static void mwifiex_usb_submit_rem_rx_urbs(struct mwifiex_adapter *adapter)
1010 } 1027 }
1011} 1028}
1012 1029
1030/* This function is called after the card has woken up. */
1031static inline int
1032mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
1033{
1034 return 0;
1035}
1036
1013static struct mwifiex_if_ops usb_ops = { 1037static struct mwifiex_if_ops usb_ops = {
1014 .register_dev = mwifiex_register_dev, 1038 .register_dev = mwifiex_register_dev,
1015 .unregister_dev = mwifiex_unregister_dev, 1039 .unregister_dev = mwifiex_unregister_dev,
@@ -1074,4 +1098,5 @@ MODULE_VERSION(USB_VERSION);
1074MODULE_LICENSE("GPL v2"); 1098MODULE_LICENSE("GPL v2");
1075MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME); 1099MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
1076MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME); 1100MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
1101MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
1077MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME); 1102MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index a7cbba1355af..57e1a5736318 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -30,6 +30,9 @@
30#define USB8797_PID_2 0x2044 30#define USB8797_PID_2 0x2044
31#define USB8897_PID_1 0x2045 31#define USB8897_PID_1 0x2045
32#define USB8897_PID_2 0x2046 32#define USB8897_PID_2 0x2046
33#define USB8801_PID_1 0x2049
34#define USB8801_PID_2 0x204a
35
33 36
34#define USB8XXX_FW_DNLD 1 37#define USB8XXX_FW_DNLD 1
35#define USB8XXX_FW_READY 2 38#define USB8XXX_FW_READY 2
@@ -41,6 +44,7 @@
41 44
42#define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin" 45#define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin"
43#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin" 46#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
47#define USB8801_DEFAULT_FW_NAME "mrvl/usb8801_uapsta.bin"
44#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin" 48#define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin"
45 49
46#define FW_DNLD_TX_BUF_SIZE 620 50#define FW_DNLD_TX_BUF_SIZE 620
@@ -96,11 +100,4 @@ struct fw_data {
96 u8 data[1]; 100 u8 data[1];
97}; 101};
98 102
99/* This function is called after the card has woken up. */
100static inline int
101mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
102{
103 return 0;
104}
105
106#endif /*_MWIFIEX_USB_H */ 103#endif /*_MWIFIEX_USB_H */
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index b1768fbf98f2..308550611f22 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -25,6 +25,96 @@
25#include "wmm.h" 25#include "wmm.h"
26#include "11n.h" 26#include "11n.h"
27 27
28static struct mwifiex_debug_data items[] = {
29 {"int_counter", item_size(int_counter),
30 item_addr(int_counter), 1},
31 {"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]),
32 item_addr(packets_out[WMM_AC_VO]), 1},
33 {"wmm_ac_vi", item_size(packets_out[WMM_AC_VI]),
34 item_addr(packets_out[WMM_AC_VI]), 1},
35 {"wmm_ac_be", item_size(packets_out[WMM_AC_BE]),
36 item_addr(packets_out[WMM_AC_BE]), 1},
37 {"wmm_ac_bk", item_size(packets_out[WMM_AC_BK]),
38 item_addr(packets_out[WMM_AC_BK]), 1},
39 {"tx_buf_size", item_size(tx_buf_size),
40 item_addr(tx_buf_size), 1},
41 {"curr_tx_buf_size", item_size(curr_tx_buf_size),
42 item_addr(curr_tx_buf_size), 1},
43 {"ps_mode", item_size(ps_mode),
44 item_addr(ps_mode), 1},
45 {"ps_state", item_size(ps_state),
46 item_addr(ps_state), 1},
47 {"is_deep_sleep", item_size(is_deep_sleep),
48 item_addr(is_deep_sleep), 1},
49 {"wakeup_dev_req", item_size(pm_wakeup_card_req),
50 item_addr(pm_wakeup_card_req), 1},
51 {"wakeup_tries", item_size(pm_wakeup_fw_try),
52 item_addr(pm_wakeup_fw_try), 1},
53 {"hs_configured", item_size(is_hs_configured),
54 item_addr(is_hs_configured), 1},
55 {"hs_activated", item_size(hs_activated),
56 item_addr(hs_activated), 1},
57 {"num_tx_timeout", item_size(num_tx_timeout),
58 item_addr(num_tx_timeout), 1},
59 {"is_cmd_timedout", item_size(is_cmd_timedout),
60 item_addr(is_cmd_timedout), 1},
61 {"timeout_cmd_id", item_size(timeout_cmd_id),
62 item_addr(timeout_cmd_id), 1},
63 {"timeout_cmd_act", item_size(timeout_cmd_act),
64 item_addr(timeout_cmd_act), 1},
65 {"last_cmd_id", item_size(last_cmd_id),
66 item_addr(last_cmd_id), DBG_CMD_NUM},
67 {"last_cmd_act", item_size(last_cmd_act),
68 item_addr(last_cmd_act), DBG_CMD_NUM},
69 {"last_cmd_index", item_size(last_cmd_index),
70 item_addr(last_cmd_index), 1},
71 {"last_cmd_resp_id", item_size(last_cmd_resp_id),
72 item_addr(last_cmd_resp_id), DBG_CMD_NUM},
73 {"last_cmd_resp_index", item_size(last_cmd_resp_index),
74 item_addr(last_cmd_resp_index), 1},
75 {"last_event", item_size(last_event),
76 item_addr(last_event), DBG_CMD_NUM},
77 {"last_event_index", item_size(last_event_index),
78 item_addr(last_event_index), 1},
79 {"num_cmd_h2c_fail", item_size(num_cmd_host_to_card_failure),
80 item_addr(num_cmd_host_to_card_failure), 1},
81 {"num_cmd_sleep_cfm_fail",
82 item_size(num_cmd_sleep_cfm_host_to_card_failure),
83 item_addr(num_cmd_sleep_cfm_host_to_card_failure), 1},
84 {"num_tx_h2c_fail", item_size(num_tx_host_to_card_failure),
85 item_addr(num_tx_host_to_card_failure), 1},
86 {"num_evt_deauth", item_size(num_event_deauth),
87 item_addr(num_event_deauth), 1},
88 {"num_evt_disassoc", item_size(num_event_disassoc),
89 item_addr(num_event_disassoc), 1},
90 {"num_evt_link_lost", item_size(num_event_link_lost),
91 item_addr(num_event_link_lost), 1},
92 {"num_cmd_deauth", item_size(num_cmd_deauth),
93 item_addr(num_cmd_deauth), 1},
94 {"num_cmd_assoc_ok", item_size(num_cmd_assoc_success),
95 item_addr(num_cmd_assoc_success), 1},
96 {"num_cmd_assoc_fail", item_size(num_cmd_assoc_failure),
97 item_addr(num_cmd_assoc_failure), 1},
98 {"cmd_sent", item_size(cmd_sent),
99 item_addr(cmd_sent), 1},
100 {"data_sent", item_size(data_sent),
101 item_addr(data_sent), 1},
102 {"cmd_resp_received", item_size(cmd_resp_received),
103 item_addr(cmd_resp_received), 1},
104 {"event_received", item_size(event_received),
105 item_addr(event_received), 1},
106
107 /* variables defined in struct mwifiex_adapter */
108 {"cmd_pending", adapter_item_size(cmd_pending),
109 adapter_item_addr(cmd_pending), 1},
110 {"tx_pending", adapter_item_size(tx_pending),
111 adapter_item_addr(tx_pending), 1},
112 {"rx_pending", adapter_item_size(rx_pending),
113 adapter_item_addr(rx_pending), 1},
114};
115
116static int num_of_items = ARRAY_SIZE(items);
117
28/* 118/*
29 * Firmware initialization complete callback handler. 119 * Firmware initialization complete callback handler.
30 * 120 *
@@ -97,6 +187,8 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
97 info->rx_tbl); 187 info->rx_tbl);
98 info->tx_tbl_num = mwifiex_get_tx_ba_stream_tbl(priv, 188 info->tx_tbl_num = mwifiex_get_tx_ba_stream_tbl(priv,
99 info->tx_tbl); 189 info->tx_tbl);
190 info->tdls_peer_num = mwifiex_get_tdls_list(priv,
191 info->tdls_list);
100 info->ps_mode = adapter->ps_mode; 192 info->ps_mode = adapter->ps_mode;
101 info->ps_state = adapter->ps_state; 193 info->ps_state = adapter->ps_state;
102 info->is_deep_sleep = adapter->is_deep_sleep; 194 info->is_deep_sleep = adapter->is_deep_sleep;
@@ -141,6 +233,93 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
141 return 0; 233 return 0;
142} 234}
143 235
236int mwifiex_debug_info_to_buffer(struct mwifiex_private *priv, char *buf,
237 struct mwifiex_debug_info *info)
238{
239 char *p = buf;
240 struct mwifiex_debug_data *d = &items[0];
241 size_t size, addr;
242 long val;
243 int i, j;
244
245 if (!info)
246 return 0;
247
248 for (i = 0; i < num_of_items; i++) {
249 p += sprintf(p, "%s=", d[i].name);
250
251 size = d[i].size / d[i].num;
252
253 if (i < (num_of_items - 3))
254 addr = d[i].addr + (size_t)info;
255 else /* The last 3 items are struct mwifiex_adapter variables */
256 addr = d[i].addr + (size_t)priv->adapter;
257
258 for (j = 0; j < d[i].num; j++) {
259 switch (size) {
260 case 1:
261 val = *((u8 *)addr);
262 break;
263 case 2:
264 val = *((u16 *)addr);
265 break;
266 case 4:
267 val = *((u32 *)addr);
268 break;
269 case 8:
270 val = *((long long *)addr);
271 break;
272 default:
273 val = -1;
274 break;
275 }
276
277 p += sprintf(p, "%#lx ", val);
278 addr += size;
279 }
280
281 p += sprintf(p, "\n");
282 }
283
284 if (info->tx_tbl_num) {
285 p += sprintf(p, "Tx BA stream table:\n");
286 for (i = 0; i < info->tx_tbl_num; i++)
287 p += sprintf(p, "tid = %d, ra = %pM\n",
288 info->tx_tbl[i].tid, info->tx_tbl[i].ra);
289 }
290
291 if (info->rx_tbl_num) {
292 p += sprintf(p, "Rx reorder table:\n");
293 for (i = 0; i < info->rx_tbl_num; i++) {
294 p += sprintf(p, "tid = %d, ta = %pM, ",
295 info->rx_tbl[i].tid,
296 info->rx_tbl[i].ta);
297 p += sprintf(p, "start_win = %d, ",
298 info->rx_tbl[i].start_win);
299 p += sprintf(p, "win_size = %d, buffer: ",
300 info->rx_tbl[i].win_size);
301
302 for (j = 0; j < info->rx_tbl[i].win_size; j++)
303 p += sprintf(p, "%c ",
304 info->rx_tbl[i].buffer[j] ?
305 '1' : '0');
306
307 p += sprintf(p, "\n");
308 }
309 }
310
311 if (info->tdls_peer_num) {
312 p += sprintf(p, "TDLS peer table:\n");
313 for (i = 0; i < info->tdls_peer_num; i++) {
314 p += sprintf(p, "peer = %pM",
315 info->tdls_list[i].peer_addr);
316 p += sprintf(p, "\n");
317 }
318 }
319
320 return p - buf;
321}
322
144static int 323static int
145mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len, 324mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
146 struct rxpd *rx_pd) 325 struct rxpd *rx_pd)
@@ -208,7 +387,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
208 pkt_len -= ETH_ALEN + sizeof(pkt_len); 387 pkt_len -= ETH_ALEN + sizeof(pkt_len);
209 rx_pd->rx_pkt_length = cpu_to_le16(pkt_len); 388 rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
210 389
211 cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq, 390 cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq,
212 CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len, 391 CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len,
213 0); 392 0);
214 393
@@ -404,3 +583,44 @@ void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
404 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); 583 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
405 return; 584 return;
406} 585}
586
587/* This function adds histogram data to histogram array*/
588void mwifiex_hist_data_add(struct mwifiex_private *priv,
589 u8 rx_rate, s8 snr, s8 nflr)
590{
591 struct mwifiex_histogram_data *phist_data = priv->hist_data;
592
593 if (atomic_read(&phist_data->num_samples) > MWIFIEX_HIST_MAX_SAMPLES)
594 mwifiex_hist_data_reset(priv);
595 mwifiex_hist_data_set(priv, rx_rate, snr, nflr);
596}
597
598/* function to add histogram record */
599void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr,
600 s8 nflr)
601{
602 struct mwifiex_histogram_data *phist_data = priv->hist_data;
603
604 atomic_inc(&phist_data->num_samples);
605 atomic_inc(&phist_data->rx_rate[rx_rate]);
606 atomic_inc(&phist_data->snr[snr]);
607 atomic_inc(&phist_data->noise_flr[128 + nflr]);
608 atomic_inc(&phist_data->sig_str[nflr - snr]);
609}
610
611/* function to reset histogram data during init/reset */
612void mwifiex_hist_data_reset(struct mwifiex_private *priv)
613{
614 int ix;
615 struct mwifiex_histogram_data *phist_data = priv->hist_data;
616
617 atomic_set(&phist_data->num_samples, 0);
618 for (ix = 0; ix < MWIFIEX_MAX_AC_RX_RATES; ix++)
619 atomic_set(&phist_data->rx_rate[ix], 0);
620 for (ix = 0; ix < MWIFIEX_MAX_SNR; ix++)
621 atomic_set(&phist_data->snr[ix], 0);
622 for (ix = 0; ix < MWIFIEX_MAX_NOISE_FLR; ix++)
623 atomic_set(&phist_data->noise_flr[ix], 0);
624 for (ix = 0; ix < MWIFIEX_MAX_SIG_STRENGTH; ix++)
625 atomic_set(&phist_data->sig_str[ix], 0);
626}
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index 40296cb4a3f1..b541d66c01eb 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -20,6 +20,8 @@
20#ifndef _MWIFIEX_UTIL_H_ 20#ifndef _MWIFIEX_UTIL_H_
21#define _MWIFIEX_UTIL_H_ 21#define _MWIFIEX_UTIL_H_
22 22
23struct mwifiex_private;
24
23struct mwifiex_dma_mapping { 25struct mwifiex_dma_mapping {
24 dma_addr_t addr; 26 dma_addr_t addr;
25 size_t len; 27 size_t len;
@@ -33,6 +35,21 @@ struct mwifiex_cb {
33 }; 35 };
34}; 36};
35 37
38/* size/addr for mwifiex_debug_info */
39#define item_size(n) (FIELD_SIZEOF(struct mwifiex_debug_info, n))
40#define item_addr(n) (offsetof(struct mwifiex_debug_info, n))
41
42/* size/addr for struct mwifiex_adapter */
43#define adapter_item_size(n) (FIELD_SIZEOF(struct mwifiex_adapter, n))
44#define adapter_item_addr(n) (offsetof(struct mwifiex_adapter, n))
45
46struct mwifiex_debug_data {
47 char name[32]; /* variable/array name */
48 u32 size; /* size of the variable/array */
49 size_t addr; /* address of the variable/array */
50 int num; /* number of variables in an array */
51};
52
36static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb) 53static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
37{ 54{
38 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb; 55 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
@@ -73,4 +90,7 @@ static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
73 return mapping.addr; 90 return mapping.addr;
74} 91}
75 92
93int mwifiex_debug_info_to_buffer(struct mwifiex_private *priv, char *buf,
94 struct mwifiex_debug_info *info);
95
76#endif /* !_MWIFIEX_UTIL_H_ */ 96#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index ffffd2c5a76e..ef717acec8b7 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1228,6 +1228,9 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
1228 case -EINPROGRESS: 1228 case -EINPROGRESS:
1229 if (adapter->iface_type != MWIFIEX_PCIE) 1229 if (adapter->iface_type != MWIFIEX_PCIE)
1230 adapter->data_sent = false; 1230 adapter->data_sent = false;
1231 break;
1232 case 0:
1233 mwifiex_write_data_complete(adapter, skb, 0, ret);
1231 default: 1234 default:
1232 break; 1235 break;
1233 } 1236 }
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index b8d1e04aa9b9..f9b1218c761a 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -3098,14 +3098,14 @@ static void mwl8k_update_survey(struct mwl8k_priv *priv,
3098 3098
3099 cca_cnt = ioread32(priv->regs + NOK_CCA_CNT_REG); 3099 cca_cnt = ioread32(priv->regs + NOK_CCA_CNT_REG);
3100 cca_cnt /= 1000; /* uSecs to mSecs */ 3100 cca_cnt /= 1000; /* uSecs to mSecs */
3101 survey->channel_time_busy = (u64) cca_cnt; 3101 survey->time_busy = (u64) cca_cnt;
3102 3102
3103 rx_rdy = ioread32(priv->regs + BBU_RXRDY_CNT_REG); 3103 rx_rdy = ioread32(priv->regs + BBU_RXRDY_CNT_REG);
3104 rx_rdy /= 1000; /* uSecs to mSecs */ 3104 rx_rdy /= 1000; /* uSecs to mSecs */
3105 survey->channel_time_rx = (u64) rx_rdy; 3105 survey->time_rx = (u64) rx_rdy;
3106 3106
3107 priv->channel_time = jiffies - priv->channel_time; 3107 priv->channel_time = jiffies - priv->channel_time;
3108 survey->channel_time = jiffies_to_msecs(priv->channel_time); 3108 survey->time = jiffies_to_msecs(priv->channel_time);
3109 3109
3110 survey->channel = channel; 3110 survey->channel = channel;
3111 3111
@@ -3115,9 +3115,9 @@ static void mwl8k_update_survey(struct mwl8k_priv *priv,
3115 survey->noise = nf * -1; 3115 survey->noise = nf * -1;
3116 3116
3117 survey->filled = SURVEY_INFO_NOISE_DBM | 3117 survey->filled = SURVEY_INFO_NOISE_DBM |
3118 SURVEY_INFO_CHANNEL_TIME | 3118 SURVEY_INFO_TIME |
3119 SURVEY_INFO_CHANNEL_TIME_BUSY | 3119 SURVEY_INFO_TIME_BUSY |
3120 SURVEY_INFO_CHANNEL_TIME_RX; 3120 SURVEY_INFO_TIME_RX;
3121} 3121}
3122 3122
3123/* 3123/*
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index 60698b020851..6d831d4d1b5f 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -1,7 +1,8 @@
1config HERMES 1config HERMES
2 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)" 2 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
3 depends on (PPC_PMAC || PCI || PCMCIA) 3 depends on (PPC_PMAC || PCI || PCMCIA)
4 depends on CFG80211 && CFG80211_WEXT 4 depends on CFG80211
5 select CFG80211_WEXT
5 select WIRELESS_EXT 6 select WIRELESS_EXT
6 select WEXT_SPY 7 select WEXT_SPY
7 select WEXT_PRIV 8 select WEXT_PRIV
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 38ec8d19ac29..c410180479e6 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -2342,7 +2342,7 @@ void free_orinocodev(struct orinoco_private *priv)
2342 list_for_each_entry_safe(sd, sdtemp, &priv->scan_list, list) { 2342 list_for_each_entry_safe(sd, sdtemp, &priv->scan_list, list) {
2343 list_del(&sd->list); 2343 list_del(&sd->list);
2344 2344
2345 if ((sd->len > 0) && sd->buf) 2345 if (sd->len > 0)
2346 kfree(sd->buf); 2346 kfree(sd->buf);
2347 kfree(sd); 2347 kfree(sd);
2348 } 2348 }
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index b6bdad632842..74219d59d7e1 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -94,7 +94,7 @@ static int orinoco_pci_cor_reset(struct orinoco_private *priv)
94 mdelay(HERMES_PCI_COR_OFFT); 94 mdelay(HERMES_PCI_COR_OFFT);
95 95
96 /* The card is ready when it's no longer busy */ 96 /* The card is ready when it's no longer busy */
97 timeout = jiffies + (HERMES_PCI_COR_BUSYT * HZ / 1000); 97 timeout = jiffies + msecs_to_jiffies(HERMES_PCI_COR_BUSYT);
98 reg = hermes_read_regn(hw, CMD); 98 reg = hermes_read_regn(hw, CMD);
99 while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) { 99 while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
100 mdelay(1); 100 mdelay(1);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index b8f6e5c431ae..8b045236b6e0 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -121,7 +121,7 @@ static int orinoco_plx_cor_reset(struct orinoco_private *priv)
121 mdelay(1); 121 mdelay(1);
122 122
123 /* Just in case, wait more until the card is no longer busy */ 123 /* Just in case, wait more until the card is no longer busy */
124 timeout = jiffies + (PLX_RESET_TIME * HZ / 1000); 124 timeout = jiffies + msecs_to_jiffies(PLX_RESET_TIME);
125 reg = hermes_read_regn(hw, CMD); 125 reg = hermes_read_regn(hw, CMD);
126 while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) { 126 while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
127 mdelay(1); 127 mdelay(1);
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 79d0e33b625e..20ce569b8a43 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -71,7 +71,7 @@ static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
71 mdelay(1); 71 mdelay(1);
72 72
73 /* Just in case, wait more until the card is no longer busy */ 73 /* Just in case, wait more until the card is no longer busy */
74 timeout = jiffies + (TMD_RESET_TIME * HZ / 1000); 74 timeout = jiffies + msecs_to_jiffies(TMD_RESET_TIME);
75 reg = hermes_read_regn(hw, CMD); 75 reg = hermes_read_regn(hw, CMD);
76 while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) { 76 while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
77 mdelay(1); 77 mdelay(1);
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 995846422dc0..91f05442de28 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -364,9 +364,7 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
364 atomic_set(&ctx->refcount, 1); 364 atomic_set(&ctx->refcount, 1);
365 init_completion(&ctx->done); 365 init_completion(&ctx->done);
366 366
367 init_timer(&ctx->timer); 367 setup_timer(&ctx->timer, ezusb_request_timerfn, (u_long)ctx);
368 ctx->timer.function = ezusb_request_timerfn;
369 ctx->timer.data = (u_long) ctx;
370 return ctx; 368 return ctx;
371} 369}
372 370
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 0fe67d2da208..2fe713eda7ad 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -196,9 +196,9 @@ static int p54_generate_band(struct ieee80211_hw *dev,
196 dest->max_power = chan->max_power; 196 dest->max_power = chan->max_power;
197 priv->survey[*chan_num].channel = &tmp->channels[j]; 197 priv->survey[*chan_num].channel = &tmp->channels[j];
198 priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM | 198 priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM |
199 SURVEY_INFO_CHANNEL_TIME | 199 SURVEY_INFO_TIME |
200 SURVEY_INFO_CHANNEL_TIME_BUSY | 200 SURVEY_INFO_TIME_BUSY |
201 SURVEY_INFO_CHANNEL_TIME_TX; 201 SURVEY_INFO_TIME_TX;
202 dest->hw_value = (*chan_num); 202 dest->hw_value = (*chan_num);
203 j++; 203 j++;
204 (*chan_num)++; 204 (*chan_num)++;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index bc065e8e348b..5367d510b22d 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -220,6 +220,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
220 struct sk_buff *skb; 220 struct sk_buff *skb;
221 size_t eeprom_hdr_size; 221 size_t eeprom_hdr_size;
222 int ret = 0; 222 int ret = 0;
223 long timeout;
223 224
224 if (priv->fw_var >= 0x509) 225 if (priv->fw_var >= 0x509)
225 eeprom_hdr_size = sizeof(*eeprom_hdr); 226 eeprom_hdr_size = sizeof(*eeprom_hdr);
@@ -249,9 +250,11 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
249 250
250 p54_tx(priv, skb); 251 p54_tx(priv, skb);
251 252
252 if (!wait_for_completion_interruptible_timeout( 253 timeout = wait_for_completion_interruptible_timeout(
253 &priv->eeprom_comp, HZ)) { 254 &priv->eeprom_comp, HZ);
254 wiphy_err(priv->hw->wiphy, "device does not respond!\n"); 255 if (timeout <= 0) {
256 wiphy_err(priv->hw->wiphy,
257 "device does not respond or signal received!\n");
255 ret = -EBUSY; 258 ret = -EBUSY;
256 } 259 }
257 priv->eeprom = NULL; 260 priv->eeprom = NULL;
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 97aeff0edb84..b9250d75d253 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -305,9 +305,9 @@ static void p54_reset_stats(struct p54_common *priv)
305 struct survey_info *info = &priv->survey[chan->hw_value]; 305 struct survey_info *info = &priv->survey[chan->hw_value];
306 306
307 /* only reset channel statistics, don't touch .filled, etc. */ 307 /* only reset channel statistics, don't touch .filled, etc. */
308 info->channel_time = 0; 308 info->time = 0;
309 info->channel_time_busy = 0; 309 info->time_busy = 0;
310 info->channel_time_tx = 0; 310 info->time_tx = 0;
311 } 311 }
312 312
313 priv->update_stats = true; 313 priv->update_stats = true;
@@ -575,6 +575,8 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
575 key->hw_key_idx = 0xff; 575 key->hw_key_idx = 0xff;
576 goto out_unlock; 576 goto out_unlock;
577 } 577 }
578
579 key->flags |= IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
578 } else { 580 } else {
579 slot = key->hw_key_idx; 581 slot = key->hw_key_idx;
580 582
@@ -634,7 +636,7 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
634 636
635 if (in_use) { 637 if (in_use) {
636 /* test if the reported statistics are valid. */ 638 /* test if the reported statistics are valid. */
637 if (survey->channel_time != 0) { 639 if (survey->time != 0) {
638 survey->filled |= SURVEY_INFO_IN_USE; 640 survey->filled |= SURVEY_INFO_IN_USE;
639 } else { 641 } else {
640 /* 642 /*
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index d4aee64fb5ea..27a49068d32d 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -431,6 +431,7 @@ static int p54p_open(struct ieee80211_hw *dev)
431{ 431{
432 struct p54p_priv *priv = dev->priv; 432 struct p54p_priv *priv = dev->priv;
433 int err; 433 int err;
434 long timeout;
434 435
435 init_completion(&priv->boot_comp); 436 init_completion(&priv->boot_comp);
436 err = request_irq(priv->pdev->irq, p54p_interrupt, 437 err = request_irq(priv->pdev->irq, p54p_interrupt,
@@ -468,10 +469,12 @@ static int p54p_open(struct ieee80211_hw *dev)
468 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); 469 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
469 P54P_READ(dev_int); 470 P54P_READ(dev_int);
470 471
471 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { 472 timeout = wait_for_completion_interruptible_timeout(
473 &priv->boot_comp, HZ);
474 if (timeout <= 0) {
472 wiphy_err(dev->wiphy, "Cannot boot firmware!\n"); 475 wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
473 p54p_stop(dev); 476 p54p_stop(dev);
474 return -ETIMEDOUT; 477 return timeout ? -ERESTARTSYS : -ETIMEDOUT;
475 } 478 }
476 479
477 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); 480 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 153c61539ec8..24e5ff9a9272 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -587,13 +587,13 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
587 if (chan) { 587 if (chan) {
588 struct survey_info *survey = &priv->survey[chan->hw_value]; 588 struct survey_info *survey = &priv->survey[chan->hw_value];
589 survey->noise = clamp(priv->noise, -128, 127); 589 survey->noise = clamp(priv->noise, -128, 127);
590 survey->channel_time = priv->survey_raw.active; 590 survey->time = priv->survey_raw.active;
591 survey->channel_time_tx = priv->survey_raw.tx; 591 survey->time_tx = priv->survey_raw.tx;
592 survey->channel_time_busy = priv->survey_raw.tx + 592 survey->time_busy = priv->survey_raw.tx +
593 priv->survey_raw.cca; 593 priv->survey_raw.cca;
594 do_div(survey->channel_time, 1024); 594 do_div(survey->time, 1024);
595 do_div(survey->channel_time_tx, 1024); 595 do_div(survey->time_tx, 1024);
596 do_div(survey->channel_time_busy, 1024); 596 do_div(survey->time_busy, 1024);
597 } 597 }
598 598
599 tmp = p54_find_and_unlink_skb(priv, hdr->req_id); 599 tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 1a4facd1fbf3..60d44ce9c017 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2478,7 +2478,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
2478 ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len); 2478 ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len);
2479 if (ret == 0) { 2479 if (ret == 0) {
2480 sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000; 2480 sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000;
2481 sinfo->filled |= STATION_INFO_TX_BITRATE; 2481 sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
2482 } 2482 }
2483 2483
2484 len = sizeof(rssi); 2484 len = sizeof(rssi);
@@ -2486,7 +2486,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
2486 &rssi, &len); 2486 &rssi, &len);
2487 if (ret == 0) { 2487 if (ret == 0) {
2488 sinfo->signal = level_to_qual(le32_to_cpu(rssi)); 2488 sinfo->signal = level_to_qual(le32_to_cpu(rssi));
2489 sinfo->filled |= STATION_INFO_SIGNAL; 2489 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
2490 } 2490 }
2491} 2491}
2492 2492
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 4834a9abc171..b6cc9ff47fc2 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,7 +172,6 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
172 (struct rsi_91x_sdiodev *)adapter->rsi_dev; 172 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
173 u32 len; 173 u32 len;
174 u32 num_blocks; 174 u32 num_blocks;
175 const u8 *fw;
176 const struct firmware *fw_entry = NULL; 175 const struct firmware *fw_entry = NULL;
177 u32 block_size = dev->tx_blk_size; 176 u32 block_size = dev->tx_blk_size;
178 int status = 0; 177 int status = 0;
@@ -201,7 +200,6 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
201 return status; 200 return status;
202 } 201 }
203 202
204 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
205 len = fw_entry->size; 203 len = fw_entry->size;
206 204
207 if (len % 4) 205 if (len % 4)
@@ -212,7 +210,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
212 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); 210 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
213 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 211 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
214 212
215 status = rsi_copy_to_card(common, fw, len, num_blocks); 213 status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
216 release_firmware(fw_entry); 214 release_firmware(fw_entry);
217 return status; 215 return status;
218} 216}
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 81ee481487cf..be2d54f257b1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -8020,13 +8020,13 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
8020 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &busy_ext); 8020 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &busy_ext);
8021 8021
8022 if (idle || busy) { 8022 if (idle || busy) {
8023 survey->filled = SURVEY_INFO_CHANNEL_TIME | 8023 survey->filled = SURVEY_INFO_TIME |
8024 SURVEY_INFO_CHANNEL_TIME_BUSY | 8024 SURVEY_INFO_TIME_BUSY |
8025 SURVEY_INFO_CHANNEL_TIME_EXT_BUSY; 8025 SURVEY_INFO_TIME_EXT_BUSY;
8026 8026
8027 survey->channel_time = (idle + busy) / 1000; 8027 survey->time = (idle + busy) / 1000;
8028 survey->channel_time_busy = busy / 1000; 8028 survey->time_busy = busy / 1000;
8029 survey->channel_time_ext_busy = busy_ext / 1000; 8029 survey->time_ext_busy = busy_ext / 1000;
8030 } 8030 }
8031 8031
8032 if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) 8032 if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 1122dc44c9fd..48a2cad29477 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -240,7 +240,7 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
240 rt2x00dev->rf_channel = libconf.rf.channel; 240 rt2x00dev->rf_channel = libconf.rf.channel;
241 } 241 }
242 242
243 if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) && 243 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_PS_AUTOWAKE) &&
244 (ieee80211_flags & IEEE80211_CONF_CHANGE_PS)) 244 (ieee80211_flags & IEEE80211_CONF_CHANGE_PS))
245 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); 245 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
246 246
@@ -257,7 +257,7 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
257 rt2x00link_reset_tuner(rt2x00dev, false); 257 rt2x00link_reset_tuner(rt2x00dev, false);
258 258
259 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && 259 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
260 test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) && 260 rt2x00_has_cap_flag(rt2x00dev, REQUIRE_PS_AUTOWAKE) &&
261 (ieee80211_flags & IEEE80211_CONF_CHANGE_PS) && 261 (ieee80211_flags & IEEE80211_CONF_CHANGE_PS) &&
262 (conf->flags & IEEE80211_CONF_PS)) { 262 (conf->flags & IEEE80211_CONF_PS)) {
263 beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon; 263 beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9967a1d9f0ec..5639ed816813 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -351,7 +351,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
351 /* 351 /*
352 * Remove L2 padding which was added during 352 * Remove L2 padding which was added during
353 */ 353 */
354 if (test_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags)) 354 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD))
355 rt2x00queue_remove_l2pad(entry->skb, header_length); 355 rt2x00queue_remove_l2pad(entry->skb, header_length);
356 356
357 /* 357 /*
@@ -460,7 +460,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
460 * send the status report back. 460 * send the status report back.
461 */ 461 */
462 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) { 462 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
463 if (test_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags)) 463 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT))
464 ieee80211_tx_status(rt2x00dev->hw, entry->skb); 464 ieee80211_tx_status(rt2x00dev->hw, entry->skb);
465 else 465 else
466 ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); 466 ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
@@ -1056,9 +1056,9 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
1056 /* 1056 /*
1057 * Take TX headroom required for alignment into account. 1057 * Take TX headroom required for alignment into account.
1058 */ 1058 */
1059 if (test_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags)) 1059 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD))
1060 rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE; 1060 rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
1061 else if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) 1061 else if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA))
1062 rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE; 1062 rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
1063 1063
1064 /* 1064 /*
@@ -1069,7 +1069,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
1069 /* 1069 /*
1070 * Allocate tx status FIFO for driver use. 1070 * Allocate tx status FIFO for driver use.
1071 */ 1071 */
1072 if (test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags)) { 1072 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO)) {
1073 /* 1073 /*
1074 * Allocate the txstatus fifo. In the worst case the tx 1074 * Allocate the txstatus fifo. In the worst case the tx
1075 * status fifo has to hold the tx status of all entries 1075 * status fifo has to hold the tx status of all entries
@@ -1131,7 +1131,7 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
1131 /* 1131 /*
1132 * Stop rfkill polling. 1132 * Stop rfkill polling.
1133 */ 1133 */
1134 if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) 1134 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
1135 rt2x00rfkill_unregister(rt2x00dev); 1135 rt2x00rfkill_unregister(rt2x00dev);
1136 1136
1137 /* 1137 /*
@@ -1173,7 +1173,7 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1173 /* 1173 /*
1174 * Start rfkill polling. 1174 * Start rfkill polling.
1175 */ 1175 */
1176 if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) 1176 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
1177 rt2x00rfkill_register(rt2x00dev); 1177 rt2x00rfkill_register(rt2x00dev);
1178 1178
1179 return 0; 1179 return 0;
@@ -1389,7 +1389,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1389 /* 1389 /*
1390 * Start rfkill polling. 1390 * Start rfkill polling.
1391 */ 1391 */
1392 if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) 1392 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
1393 rt2x00rfkill_register(rt2x00dev); 1393 rt2x00rfkill_register(rt2x00dev);
1394 1394
1395 return 0; 1395 return 0;
@@ -1408,7 +1408,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1408 /* 1408 /*
1409 * Stop rfkill polling. 1409 * Stop rfkill polling.
1410 */ 1410 */
1411 if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) 1411 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL))
1412 rt2x00rfkill_unregister(rt2x00dev); 1412 rt2x00rfkill_unregister(rt2x00dev);
1413 1413
1414 /* 1414 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index fbae2799e3ee..5813300f68a2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -96,7 +96,7 @@ int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev)
96{ 96{
97 int retval; 97 int retval;
98 98
99 if (!test_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags)) 99 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_FIRMWARE))
100 return 0; 100 return 0;
101 101
102 if (!rt2x00dev->fw) { 102 if (!rt2x00dev->fw) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index cb40245a0695..300876df056f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -119,7 +119,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
119 * Use the ATIM queue if appropriate and present. 119 * Use the ATIM queue if appropriate and present.
120 */ 120 */
121 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && 121 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
122 test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) 122 rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE))
123 qid = QID_ATIM; 123 qid = QID_ATIM;
124 124
125 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); 125 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 66ff36447b94..68b620b2462f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -85,7 +85,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
85 memset(skbdesc, 0, sizeof(*skbdesc)); 85 memset(skbdesc, 0, sizeof(*skbdesc));
86 skbdesc->entry = entry; 86 skbdesc->entry = entry;
87 87
88 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { 88 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
89 dma_addr_t skb_dma; 89 dma_addr_t skb_dma;
90 90
91 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, 91 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
@@ -198,7 +198,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
198 198
199 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 199 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
200 200
201 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) { 201 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
202 /* 202 /*
203 * rt2800 has a H/W (or F/W) bug, device incorrectly increase 203 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
204 * seqno on retransmited data (non-QOS) frames. To workaround 204 * seqno on retransmited data (non-QOS) frames. To workaround
@@ -484,7 +484,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
484 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); 484 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
485 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); 485 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
486 486
487 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) 487 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC))
488 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, 488 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
489 sta, hwrate); 489 sta, hwrate);
490 else 490 else
@@ -526,7 +526,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
526 /* 526 /*
527 * Map the skb to DMA. 527 * Map the skb to DMA.
528 */ 528 */
529 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) && 529 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) &&
530 rt2x00queue_map_txskb(entry)) 530 rt2x00queue_map_txskb(entry))
531 return -ENOMEM; 531 return -ENOMEM;
532 532
@@ -646,7 +646,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
646 */ 646 */
647 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 647 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
648 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 648 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
649 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags)) 649 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
650 rt2x00crypto_tx_copy_iv(skb, &txdesc); 650 rt2x00crypto_tx_copy_iv(skb, &txdesc);
651 else 651 else
652 rt2x00crypto_tx_remove_iv(skb, &txdesc); 652 rt2x00crypto_tx_remove_iv(skb, &txdesc);
@@ -660,9 +660,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
660 * PCI and USB devices, while header alignment only is valid 660 * PCI and USB devices, while header alignment only is valid
661 * for PCI devices. 661 * for PCI devices.
662 */ 662 */
663 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags)) 663 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
664 rt2x00queue_insert_l2pad(skb, txdesc.header_length); 664 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
665 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags)) 665 else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
666 rt2x00queue_align_frame(skb); 666 rt2x00queue_align_frame(skb);
667 667
668 /* 668 /*
@@ -1178,7 +1178,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1178 if (status) 1178 if (status)
1179 goto exit; 1179 goto exit;
1180 1180
1181 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { 1181 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) {
1182 status = rt2x00queue_alloc_entries(rt2x00dev->atim); 1182 status = rt2x00queue_alloc_entries(rt2x00dev->atim);
1183 if (status) 1183 if (status)
1184 goto exit; 1184 goto exit;
@@ -1234,7 +1234,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1234 struct data_queue *queue; 1234 struct data_queue *queue;
1235 enum data_queue_qid qid; 1235 enum data_queue_qid qid;
1236 unsigned int req_atim = 1236 unsigned int req_atim =
1237 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); 1237 rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE);
1238 1238
1239 /* 1239 /*
1240 * We need the following queues: 1240 * We need the following queues:
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 892270dd3e7b..7627af6098eb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -274,7 +274,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
274 * Schedule the delayed work for reading the TX status 274 * Schedule the delayed work for reading the TX status
275 * from the device. 275 * from the device.
276 */ 276 */
277 if (!test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags) || 277 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO) ||
278 !kfifo_is_empty(&rt2x00dev->txstatus_fifo)) 278 !kfifo_is_empty(&rt2x00dev->txstatus_fifo))
279 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 279 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
280} 280}
@@ -456,7 +456,7 @@ static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data)
456 * Kill guardian urb (if required by driver). 456 * Kill guardian urb (if required by driver).
457 */ 457 */
458 if ((entry->queue->qid == QID_BEACON) && 458 if ((entry->queue->qid == QID_BEACON) &&
459 (test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))) 459 (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)))
460 usb_kill_urb(bcn_priv->guardian_urb); 460 usb_kill_urb(bcn_priv->guardian_urb);
461 461
462 return false; 462 return false;
@@ -655,7 +655,7 @@ static int rt2x00usb_alloc_entries(struct data_queue *queue)
655 * then we are done. 655 * then we are done.
656 */ 656 */
657 if (queue->qid != QID_BEACON || 657 if (queue->qid != QID_BEACON ||
658 !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags)) 658 !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))
659 return 0; 659 return 0;
660 660
661 for (i = 0; i < queue->limit; i++) { 661 for (i = 0; i < queue->limit; i++) {
@@ -690,7 +690,7 @@ static void rt2x00usb_free_entries(struct data_queue *queue)
690 * then we are done. 690 * then we are done.
691 */ 691 */
692 if (queue->qid != QID_BEACON || 692 if (queue->qid != QID_BEACON ||
693 !test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags)) 693 !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))
694 return; 694 return;
695 695
696 for (i = 0; i < queue->limit; i++) { 696 for (i = 0; i < queue->limit; i++) {
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 40b6d1d006d7..1d4677460711 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -867,63 +867,135 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
867 * 867 *
868 * B/G rate: 868 * B/G rate:
869 * (rx_status->flag & RX_FLAG_HT) = 0, 869 * (rx_status->flag & RX_FLAG_HT) = 0,
870 * DESC92_RATE1M-->DESC92_RATE54M ==> idx is 0-->11, 870 * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
871 * 871 *
872 * N rate: 872 * N rate:
873 * (rx_status->flag & RX_FLAG_HT) = 1, 873 * (rx_status->flag & RX_FLAG_HT) = 1,
874 * DESC92_RATEMCS0-->DESC92_RATEMCS15 ==> idx is 0-->15 874 * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
875 * 875 *
876 * 5G band:rx_status->band == IEEE80211_BAND_5GHZ 876 * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
877 * A rate: 877 * A rate:
878 * (rx_status->flag & RX_FLAG_HT) = 0, 878 * (rx_status->flag & RX_FLAG_HT) = 0,
879 * DESC92_RATE6M-->DESC92_RATE54M ==> idx is 0-->7, 879 * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
880 * 880 *
881 * N rate: 881 * N rate:
882 * (rx_status->flag & RX_FLAG_HT) = 1, 882 * (rx_status->flag & RX_FLAG_HT) = 1,
883 * DESC92_RATEMCS0-->DESC92_RATEMCS15 ==> idx is 0-->15 883 * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
884 *
885 * VHT rates:
886 * DESC_RATEVHT1SS_MCS0-->DESC_RATEVHT1SS_MCS9 ==> idx is 0-->9
887 * DESC_RATEVHT2SS_MCS0-->DESC_RATEVHT2SS_MCS9 ==> idx is 0-->9
884 */ 888 */
885int rtlwifi_rate_mapping(struct ieee80211_hw *hw, 889int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht,
886 bool isht, u8 desc_rate, bool first_ampdu) 890 u8 desc_rate)
887{ 891{
888 int rate_idx; 892 int rate_idx;
889 893
894 if (isvht) {
895 switch (desc_rate) {
896 case DESC_RATEVHT1SS_MCS0:
897 rate_idx = 0;
898 break;
899 case DESC_RATEVHT1SS_MCS1:
900 rate_idx = 1;
901 break;
902 case DESC_RATEVHT1SS_MCS2:
903 rate_idx = 2;
904 break;
905 case DESC_RATEVHT1SS_MCS3:
906 rate_idx = 3;
907 break;
908 case DESC_RATEVHT1SS_MCS4:
909 rate_idx = 4;
910 break;
911 case DESC_RATEVHT1SS_MCS5:
912 rate_idx = 5;
913 break;
914 case DESC_RATEVHT1SS_MCS6:
915 rate_idx = 6;
916 break;
917 case DESC_RATEVHT1SS_MCS7:
918 rate_idx = 7;
919 break;
920 case DESC_RATEVHT1SS_MCS8:
921 rate_idx = 8;
922 break;
923 case DESC_RATEVHT1SS_MCS9:
924 rate_idx = 9;
925 break;
926 case DESC_RATEVHT2SS_MCS0:
927 rate_idx = 0;
928 break;
929 case DESC_RATEVHT2SS_MCS1:
930 rate_idx = 1;
931 break;
932 case DESC_RATEVHT2SS_MCS2:
933 rate_idx = 2;
934 break;
935 case DESC_RATEVHT2SS_MCS3:
936 rate_idx = 3;
937 break;
938 case DESC_RATEVHT2SS_MCS4:
939 rate_idx = 4;
940 break;
941 case DESC_RATEVHT2SS_MCS5:
942 rate_idx = 5;
943 break;
944 case DESC_RATEVHT2SS_MCS6:
945 rate_idx = 6;
946 break;
947 case DESC_RATEVHT2SS_MCS7:
948 rate_idx = 7;
949 break;
950 case DESC_RATEVHT2SS_MCS8:
951 rate_idx = 8;
952 break;
953 case DESC_RATEVHT2SS_MCS9:
954 rate_idx = 9;
955 break;
956 default:
957 rate_idx = 0;
958 break;
959 }
960 return rate_idx;
961 }
890 if (false == isht) { 962 if (false == isht) {
891 if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) { 963 if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
892 switch (desc_rate) { 964 switch (desc_rate) {
893 case DESC92_RATE1M: 965 case DESC_RATE1M:
894 rate_idx = 0; 966 rate_idx = 0;
895 break; 967 break;
896 case DESC92_RATE2M: 968 case DESC_RATE2M:
897 rate_idx = 1; 969 rate_idx = 1;
898 break; 970 break;
899 case DESC92_RATE5_5M: 971 case DESC_RATE5_5M:
900 rate_idx = 2; 972 rate_idx = 2;
901 break; 973 break;
902 case DESC92_RATE11M: 974 case DESC_RATE11M:
903 rate_idx = 3; 975 rate_idx = 3;
904 break; 976 break;
905 case DESC92_RATE6M: 977 case DESC_RATE6M:
906 rate_idx = 4; 978 rate_idx = 4;
907 break; 979 break;
908 case DESC92_RATE9M: 980 case DESC_RATE9M:
909 rate_idx = 5; 981 rate_idx = 5;
910 break; 982 break;
911 case DESC92_RATE12M: 983 case DESC_RATE12M:
912 rate_idx = 6; 984 rate_idx = 6;
913 break; 985 break;
914 case DESC92_RATE18M: 986 case DESC_RATE18M:
915 rate_idx = 7; 987 rate_idx = 7;
916 break; 988 break;
917 case DESC92_RATE24M: 989 case DESC_RATE24M:
918 rate_idx = 8; 990 rate_idx = 8;
919 break; 991 break;
920 case DESC92_RATE36M: 992 case DESC_RATE36M:
921 rate_idx = 9; 993 rate_idx = 9;
922 break; 994 break;
923 case DESC92_RATE48M: 995 case DESC_RATE48M:
924 rate_idx = 10; 996 rate_idx = 10;
925 break; 997 break;
926 case DESC92_RATE54M: 998 case DESC_RATE54M:
927 rate_idx = 11; 999 rate_idx = 11;
928 break; 1000 break;
929 default: 1001 default:
@@ -932,28 +1004,28 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
932 } 1004 }
933 } else { 1005 } else {
934 switch (desc_rate) { 1006 switch (desc_rate) {
935 case DESC92_RATE6M: 1007 case DESC_RATE6M:
936 rate_idx = 0; 1008 rate_idx = 0;
937 break; 1009 break;
938 case DESC92_RATE9M: 1010 case DESC_RATE9M:
939 rate_idx = 1; 1011 rate_idx = 1;
940 break; 1012 break;
941 case DESC92_RATE12M: 1013 case DESC_RATE12M:
942 rate_idx = 2; 1014 rate_idx = 2;
943 break; 1015 break;
944 case DESC92_RATE18M: 1016 case DESC_RATE18M:
945 rate_idx = 3; 1017 rate_idx = 3;
946 break; 1018 break;
947 case DESC92_RATE24M: 1019 case DESC_RATE24M:
948 rate_idx = 4; 1020 rate_idx = 4;
949 break; 1021 break;
950 case DESC92_RATE36M: 1022 case DESC_RATE36M:
951 rate_idx = 5; 1023 rate_idx = 5;
952 break; 1024 break;
953 case DESC92_RATE48M: 1025 case DESC_RATE48M:
954 rate_idx = 6; 1026 rate_idx = 6;
955 break; 1027 break;
956 case DESC92_RATE54M: 1028 case DESC_RATE54M:
957 rate_idx = 7; 1029 rate_idx = 7;
958 break; 1030 break;
959 default: 1031 default:
@@ -963,52 +1035,52 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
963 } 1035 }
964 } else { 1036 } else {
965 switch (desc_rate) { 1037 switch (desc_rate) {
966 case DESC92_RATEMCS0: 1038 case DESC_RATEMCS0:
967 rate_idx = 0; 1039 rate_idx = 0;
968 break; 1040 break;
969 case DESC92_RATEMCS1: 1041 case DESC_RATEMCS1:
970 rate_idx = 1; 1042 rate_idx = 1;
971 break; 1043 break;
972 case DESC92_RATEMCS2: 1044 case DESC_RATEMCS2:
973 rate_idx = 2; 1045 rate_idx = 2;
974 break; 1046 break;
975 case DESC92_RATEMCS3: 1047 case DESC_RATEMCS3:
976 rate_idx = 3; 1048 rate_idx = 3;
977 break; 1049 break;
978 case DESC92_RATEMCS4: 1050 case DESC_RATEMCS4:
979 rate_idx = 4; 1051 rate_idx = 4;
980 break; 1052 break;
981 case DESC92_RATEMCS5: 1053 case DESC_RATEMCS5:
982 rate_idx = 5; 1054 rate_idx = 5;
983 break; 1055 break;
984 case DESC92_RATEMCS6: 1056 case DESC_RATEMCS6:
985 rate_idx = 6; 1057 rate_idx = 6;
986 break; 1058 break;
987 case DESC92_RATEMCS7: 1059 case DESC_RATEMCS7:
988 rate_idx = 7; 1060 rate_idx = 7;
989 break; 1061 break;
990 case DESC92_RATEMCS8: 1062 case DESC_RATEMCS8:
991 rate_idx = 8; 1063 rate_idx = 8;
992 break; 1064 break;
993 case DESC92_RATEMCS9: 1065 case DESC_RATEMCS9:
994 rate_idx = 9; 1066 rate_idx = 9;
995 break; 1067 break;
996 case DESC92_RATEMCS10: 1068 case DESC_RATEMCS10:
997 rate_idx = 10; 1069 rate_idx = 10;
998 break; 1070 break;
999 case DESC92_RATEMCS11: 1071 case DESC_RATEMCS11:
1000 rate_idx = 11; 1072 rate_idx = 11;
1001 break; 1073 break;
1002 case DESC92_RATEMCS12: 1074 case DESC_RATEMCS12:
1003 rate_idx = 12; 1075 rate_idx = 12;
1004 break; 1076 break;
1005 case DESC92_RATEMCS13: 1077 case DESC_RATEMCS13:
1006 rate_idx = 13; 1078 rate_idx = 13;
1007 break; 1079 break;
1008 case DESC92_RATEMCS14: 1080 case DESC_RATEMCS14:
1009 rate_idx = 14; 1081 rate_idx = 14;
1010 break; 1082 break;
1011 case DESC92_RATEMCS15: 1083 case DESC_RATEMCS15:
1012 rate_idx = 15; 1084 rate_idx = 15;
1013 break; 1085 break;
1014 default: 1086 default:
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 982f2450feea..c6cb49c3ee32 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -123,8 +123,8 @@ void rtl_watch_dog_timer_callback(unsigned long data);
123void rtl_deinit_deferred_work(struct ieee80211_hw *hw); 123void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
124 124
125bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); 125bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
126int rtlwifi_rate_mapping(struct ieee80211_hw *hw, 126int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
127 bool isht, u8 desc_rate, bool first_ampdu); 127 bool isvht, u8 desc_rate);
128bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); 128bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
129u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); 129u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
130 130
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 5fc6f52641bd..a31a12775f1a 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -95,7 +95,8 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
95} 95}
96EXPORT_SYMBOL(rtl_bb_delay); 96EXPORT_SYMBOL(rtl_bb_delay);
97 97
98void rtl_fw_cb(const struct firmware *firmware, void *context) 98static void rtl_fw_do_work(const struct firmware *firmware, void *context,
99 bool is_wow)
99{ 100{
100 struct ieee80211_hw *hw = context; 101 struct ieee80211_hw *hw = context;
101 struct rtl_priv *rtlpriv = rtl_priv(hw); 102 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -125,12 +126,31 @@ found_alt:
125 release_firmware(firmware); 126 release_firmware(firmware);
126 return; 127 return;
127 } 128 }
128 memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size); 129 if (!is_wow) {
130 memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
131 firmware->size);
132 rtlpriv->rtlhal.fwsize = firmware->size;
133 } else {
134 memcpy(rtlpriv->rtlhal.wowlan_firmware, firmware->data,
135 firmware->size);
136 rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
137 }
129 rtlpriv->rtlhal.fwsize = firmware->size; 138 rtlpriv->rtlhal.fwsize = firmware->size;
130 release_firmware(firmware); 139 release_firmware(firmware);
131} 140}
141
142void rtl_fw_cb(const struct firmware *firmware, void *context)
143{
144 rtl_fw_do_work(firmware, context, false);
145}
132EXPORT_SYMBOL(rtl_fw_cb); 146EXPORT_SYMBOL(rtl_fw_cb);
133 147
148void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context)
149{
150 rtl_fw_do_work(firmware, context, true);
151}
152EXPORT_SYMBOL(rtl_wowlan_fw_cb);
153
134/*mutex for start & stop is must here. */ 154/*mutex for start & stop is must here. */
135static int rtl_op_start(struct ieee80211_hw *hw) 155static int rtl_op_start(struct ieee80211_hw *hw)
136{ 156{
@@ -990,6 +1010,16 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw,
990 return 0; 1010 return 0;
991} 1011}
992 1012
1013static void send_beacon_frame(struct ieee80211_hw *hw,
1014 struct ieee80211_vif *vif)
1015{
1016 struct rtl_priv *rtlpriv = rtl_priv(hw);
1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
1018
1019 if (skb)
1020 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
1021}
1022
993static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, 1023static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
994 struct ieee80211_vif *vif, 1024 struct ieee80211_vif *vif,
995 struct ieee80211_bss_conf *bss_conf, 1025 struct ieee80211_bss_conf *bss_conf,
@@ -1020,6 +1050,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
1020 1050
1021 if (rtlpriv->cfg->ops->linked_set_reg) 1051 if (rtlpriv->cfg->ops->linked_set_reg)
1022 rtlpriv->cfg->ops->linked_set_reg(hw); 1052 rtlpriv->cfg->ops->linked_set_reg(hw);
1053 send_beacon_frame(hw, vif);
1023 } 1054 }
1024 } 1055 }
1025 if ((changed & BSS_CHANGED_BEACON_ENABLED && 1056 if ((changed & BSS_CHANGED_BEACON_ENABLED &&
@@ -1851,3 +1882,40 @@ bool rtl_btc_status_false(void)
1851 return false; 1882 return false;
1852} 1883}
1853EXPORT_SYMBOL_GPL(rtl_btc_status_false); 1884EXPORT_SYMBOL_GPL(rtl_btc_status_false);
1885
1886void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igvalue)
1887{
1888 struct rtl_priv *rtlpriv = rtl_priv(hw);
1889 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
1890
1891 dm_digtable->dig_enable_flag = true;
1892 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
1893 dm_digtable->cur_igvalue = cur_igvalue;
1894 dm_digtable->pre_igvalue = 0;
1895 dm_digtable->cur_sta_cstate = DIG_STA_DISCONNECT;
1896 dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
1897 dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
1898 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
1899 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
1900 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
1901 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
1902 dm_digtable->rx_gain_max = DM_DIG_MAX;
1903 dm_digtable->rx_gain_min = DM_DIG_MIN;
1904 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
1905 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
1906 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
1907 dm_digtable->pre_cck_cca_thres = 0xff;
1908 dm_digtable->cur_cck_cca_thres = 0x83;
1909 dm_digtable->forbidden_igi = DM_DIG_MIN;
1910 dm_digtable->large_fa_hit = 0;
1911 dm_digtable->recover_cnt = 0;
1912 dm_digtable->dig_min_0 = 0x25;
1913 dm_digtable->dig_min_1 = 0x25;
1914 dm_digtable->media_connect_0 = false;
1915 dm_digtable->media_connect_1 = false;
1916 rtlpriv->dm.dm_initialgain_enable = true;
1917 dm_digtable->bt30_cur_igi = 0x32;
1918 dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
1919 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
1920}
1921EXPORT_SYMBOL(rtl_dm_diginit);
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 624e1dc16d31..7b64e34f421e 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -35,13 +35,55 @@
35 35
36#define RTL_SUPPORTED_CTRL_FILTER 0xFF 36#define RTL_SUPPORTED_CTRL_FILTER 0xFF
37 37
38#define DM_DIG_THRESH_HIGH 40
39#define DM_DIG_THRESH_LOW 35
40#define DM_FALSEALARM_THRESH_LOW 400
41#define DM_FALSEALARM_THRESH_HIGH 1000
42
43#define DM_DIG_MAX 0x3e
44#define DM_DIG_MIN 0x1e
45#define DM_DIG_MAX_AP 0x32
46#define DM_DIG_BACKOFF_MAX 12
47#define DM_DIG_BACKOFF_MIN -4
48#define DM_DIG_BACKOFF_DEFAULT 10
49
50enum cck_packet_detection_threshold {
51 CCK_PD_STAGE_LOWRSSI = 0,
52 CCK_PD_STAGE_HIGHRSSI = 1,
53 CCK_FA_STAGE_LOW = 2,
54 CCK_FA_STAGE_HIGH = 3,
55 CCK_PD_STAGE_MAX = 4,
56};
57
58enum dm_dig_ext_port_alg_e {
59 DIG_EXT_PORT_STAGE_0 = 0,
60 DIG_EXT_PORT_STAGE_1 = 1,
61 DIG_EXT_PORT_STAGE_2 = 2,
62 DIG_EXT_PORT_STAGE_3 = 3,
63 DIG_EXT_PORT_STAGE_MAX = 4,
64};
65
66enum dm_dig_connect_e {
67 DIG_STA_DISCONNECT,
68 DIG_STA_CONNECT,
69 DIG_STA_BEFORE_CONNECT,
70 DIG_MULTISTA_DISCONNECT,
71 DIG_MULTISTA_CONNECT,
72 DIG_AP_DISCONNECT,
73 DIG_AP_CONNECT,
74 DIG_AP_ADD_STATION,
75 DIG_CONNECT_MAX
76};
77
38extern const struct ieee80211_ops rtl_ops; 78extern const struct ieee80211_ops rtl_ops;
39void rtl_fw_cb(const struct firmware *firmware, void *context); 79void rtl_fw_cb(const struct firmware *firmware, void *context);
80void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context);
40void rtl_addr_delay(u32 addr); 81void rtl_addr_delay(u32 addr);
41void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr, 82void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
42 u32 mask, u32 data); 83 u32 mask, u32 data);
43void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data); 84void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
44bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); 85bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
45bool rtl_btc_status_false(void); 86bool rtl_btc_status_false(void);
87void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval);
46 88
47#endif 89#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index c70efb9a6e78..ec456f0d972e 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -578,6 +578,13 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
578 else 578 else
579 entry = (u8 *)(&ring->desc[ring->idx]); 579 entry = (u8 *)(&ring->desc[ring->idx]);
580 580
581 if (rtlpriv->cfg->ops->get_available_desc &&
582 rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) {
583 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG,
584 "no available desc!\n");
585 return;
586 }
587
581 if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx)) 588 if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
582 return; 589 return;
583 ring->idx = (ring->idx + 1) % ring->entries; 590 ring->idx = (ring->idx + 1) % ring->entries;
@@ -641,10 +648,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
641 648
642 ieee80211_tx_status_irqsafe(hw, skb); 649 ieee80211_tx_status_irqsafe(hw, skb);
643 650
644 if ((ring->entries - skb_queue_len(&ring->queue)) 651 if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
645 == 2) {
646 652
647 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, 653 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
648 "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n", 654 "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
649 prio, ring->idx, 655 prio, ring->idx,
650 skb_queue_len(&ring->queue)); 656 skb_queue_len(&ring->queue));
@@ -793,7 +799,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
793 rx_remained_cnt = 799 rx_remained_cnt =
794 rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw, 800 rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
795 hw_queue); 801 hw_queue);
796 if (rx_remained_cnt < 1) 802 if (rx_remained_cnt == 0)
797 return; 803 return;
798 804
799 } else { /* rx descriptor */ 805 } else { /* rx descriptor */
@@ -848,18 +854,18 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
848 else 854 else
849 skb_reserve(skb, stats.rx_drvinfo_size + 855 skb_reserve(skb, stats.rx_drvinfo_size +
850 stats.rx_bufshift); 856 stats.rx_bufshift);
851
852 } else { 857 } else {
853 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 858 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
854 "skb->end - skb->tail = %d, len is %d\n", 859 "skb->end - skb->tail = %d, len is %d\n",
855 skb->end - skb->tail, len); 860 skb->end - skb->tail, len);
856 break; 861 dev_kfree_skb_any(skb);
862 goto new_trx_end;
857 } 863 }
858 /* handle command packet here */ 864 /* handle command packet here */
859 if (rtlpriv->cfg->ops->rx_command_packet && 865 if (rtlpriv->cfg->ops->rx_command_packet &&
860 rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { 866 rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
861 dev_kfree_skb_any(skb); 867 dev_kfree_skb_any(skb);
862 goto end; 868 goto new_trx_end;
863 } 869 }
864 870
865 /* 871 /*
@@ -909,6 +915,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
909 } else { 915 } else {
910 dev_kfree_skb_any(skb); 916 dev_kfree_skb_any(skb);
911 } 917 }
918new_trx_end:
912 if (rtlpriv->use_new_trx_flow) { 919 if (rtlpriv->use_new_trx_flow) {
913 rtlpci->rx_ring[hw_queue].next_rx_rp += 1; 920 rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
914 rtlpci->rx_ring[hw_queue].next_rx_rp %= 921 rtlpci->rx_ring[hw_queue].next_rx_rp %=
@@ -924,7 +931,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
924 rtlpriv->enter_ps = false; 931 rtlpriv->enter_ps = false;
925 schedule_work(&rtlpriv->works.lps_change_work); 932 schedule_work(&rtlpriv->works.lps_change_work);
926 } 933 }
927end:
928 skb = new_skb; 934 skb = new_skb;
929no_new: 935no_new:
930 if (rtlpriv->use_new_trx_flow) { 936 if (rtlpriv->use_new_trx_flow) {
@@ -1688,6 +1694,15 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
1688 } 1694 }
1689 } 1695 }
1690 1696
1697 if (rtlpriv->cfg->ops->get_available_desc &&
1698 rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
1699 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1700 "get_available_desc fail\n");
1701 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1702 flags);
1703 return skb->len;
1704 }
1705
1691 if (ieee80211_is_data_qos(fc)) { 1706 if (ieee80211_is_data_qos(fc)) {
1692 tid = rtl_get_tid(skb); 1707 tid = rtl_get_tid(skb);
1693 if (sta) { 1708 if (sta) {
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index 5e832306dba9..d4567d12e07e 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -325,4 +325,11 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv,
325 writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr); 325 writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
326} 326}
327 327
328static inline u16 calc_fifo_space(u16 rp, u16 wp)
329{
330 if (rp <= wp)
331 return RTL_PCI_MAX_RX_COUNT - 1 + rp - wp;
332 return rp - wp - 1;
333}
334
328#endif 335#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
index 2aa34d9055f0..d930c1f78721 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
@@ -26,6 +26,7 @@
26#include "../wifi.h" 26#include "../wifi.h"
27#include "../base.h" 27#include "../base.h"
28#include "../pci.h" 28#include "../pci.h"
29#include "../core.h"
29#include "reg.h" 30#include "reg.h"
30#include "def.h" 31#include "def.h"
31#include "phy.h" 32#include "phy.h"
@@ -341,38 +342,6 @@ static void dm_tx_pwr_track_set_pwr(struct ieee80211_hw *hw,
341 } 342 }
342} 343}
343 344
344static void rtl88e_dm_diginit(struct ieee80211_hw *hw)
345{
346 struct rtl_priv *rtlpriv = rtl_priv(hw);
347 struct dig_t *dm_dig = &rtlpriv->dm_digtable;
348
349 dm_dig->dig_enable_flag = true;
350 dm_dig->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
351 dm_dig->pre_igvalue = 0;
352 dm_dig->cur_sta_cstate = DIG_STA_DISCONNECT;
353 dm_dig->presta_cstate = DIG_STA_DISCONNECT;
354 dm_dig->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
355 dm_dig->rssi_lowthresh = DM_DIG_THRESH_LOW;
356 dm_dig->rssi_highthresh = DM_DIG_THRESH_HIGH;
357 dm_dig->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
358 dm_dig->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
359 dm_dig->rx_gain_max = DM_DIG_MAX;
360 dm_dig->rx_gain_min = DM_DIG_MIN;
361 dm_dig->back_val = DM_DIG_BACKOFF_DEFAULT;
362 dm_dig->back_range_max = DM_DIG_BACKOFF_MAX;
363 dm_dig->back_range_min = DM_DIG_BACKOFF_MIN;
364 dm_dig->pre_cck_cca_thres = 0xff;
365 dm_dig->cur_cck_cca_thres = 0x83;
366 dm_dig->forbidden_igi = DM_DIG_MIN;
367 dm_dig->large_fa_hit = 0;
368 dm_dig->recover_cnt = 0;
369 dm_dig->dig_min_0 = 0x25;
370 dm_dig->dig_min_1 = 0x25;
371 dm_dig->media_connect_0 = false;
372 dm_dig->media_connect_1 = false;
373 rtlpriv->dm.dm_initialgain_enable = true;
374}
375
376static u8 rtl88e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) 345static u8 rtl88e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
377{ 346{
378 struct rtl_priv *rtlpriv = rtl_priv(hw); 347 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1796,9 +1765,10 @@ static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw)
1796void rtl88e_dm_init(struct ieee80211_hw *hw) 1765void rtl88e_dm_init(struct ieee80211_hw *hw)
1797{ 1766{
1798 struct rtl_priv *rtlpriv = rtl_priv(hw); 1767 struct rtl_priv *rtlpriv = rtl_priv(hw);
1768 u32 cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
1799 1769
1800 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; 1770 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
1801 rtl88e_dm_diginit(hw); 1771 rtl_dm_diginit(hw, cur_igvalue);
1802 rtl88e_dm_init_dynamic_txpower(hw); 1772 rtl88e_dm_init_dynamic_txpower(hw);
1803 rtl88e_dm_init_edca_turbo(hw); 1773 rtl88e_dm_init_edca_turbo(hw);
1804 rtl88e_dm_init_rate_adaptive_mask(hw); 1774 rtl88e_dm_init_rate_adaptive_mask(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
index 64f1f3ea9807..071ccee69eae 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
@@ -186,28 +186,12 @@
186#define BW_AUTO_SWITCH_HIGH_LOW 25 186#define BW_AUTO_SWITCH_HIGH_LOW 25
187#define BW_AUTO_SWITCH_LOW_HIGH 30 187#define BW_AUTO_SWITCH_LOW_HIGH 30
188 188
189#define DM_DIG_THRESH_HIGH 40
190#define DM_DIG_THRESH_LOW 35
191
192#define DM_FALSEALARM_THRESH_LOW 400
193#define DM_FALSEALARM_THRESH_HIGH 1000
194
195#define DM_DIG_MAX 0x3e
196#define DM_DIG_MIN 0x1e
197
198#define DM_DIG_MAX_AP 0x32
199#define DM_DIG_MIN_AP 0x20
200
201#define DM_DIG_FA_UPPER 0x3e 189#define DM_DIG_FA_UPPER 0x3e
202#define DM_DIG_FA_LOWER 0x1e 190#define DM_DIG_FA_LOWER 0x1e
203#define DM_DIG_FA_TH0 0x200 191#define DM_DIG_FA_TH0 0x200
204#define DM_DIG_FA_TH1 0x300 192#define DM_DIG_FA_TH1 0x300
205#define DM_DIG_FA_TH2 0x400 193#define DM_DIG_FA_TH2 0x400
206 194
207#define DM_DIG_BACKOFF_MAX 12
208#define DM_DIG_BACKOFF_MIN -4
209#define DM_DIG_BACKOFF_DEFAULT 10
210
211#define RXPATHSELECTION_SS_TH_W 30 195#define RXPATHSELECTION_SS_TH_W 30
212#define RXPATHSELECTION_DIFF_TH 18 196#define RXPATHSELECTION_DIFF_TH 18
213 197
@@ -262,14 +246,6 @@ enum tag_dynamic_init_gain_operation_type_definition {
262 DIG_OP_TYPE_MAX 246 DIG_OP_TYPE_MAX
263}; 247};
264 248
265enum tag_cck_packet_detection_threshold_type_definition {
266 CCK_PD_STAGE_LOWRSSI = 0,
267 CCK_PD_STAGE_HIGHRSSI = 1,
268 CCK_FA_STAGE_LOW = 2,
269 CCK_FA_STAGE_HIGH = 3,
270 CCK_PD_STAGE_MAX = 4,
271};
272
273enum dm_1r_cca_e { 249enum dm_1r_cca_e {
274 CCA_1R = 0, 250 CCA_1R = 0,
275 CCA_2R = 1, 251 CCA_2R = 1,
@@ -288,23 +264,6 @@ enum dm_sw_ant_switch_e {
288 ANS_ANTENNA_MAX = 3, 264 ANS_ANTENNA_MAX = 3,
289}; 265};
290 266
291enum dm_dig_ext_port_alg_e {
292 DIG_EXT_PORT_STAGE_0 = 0,
293 DIG_EXT_PORT_STAGE_1 = 1,
294 DIG_EXT_PORT_STAGE_2 = 2,
295 DIG_EXT_PORT_STAGE_3 = 3,
296 DIG_EXT_PORT_STAGE_MAX = 4,
297};
298
299enum dm_dig_connect_e {
300 DIG_STA_DISCONNECT = 0,
301 DIG_STA_CONNECT = 1,
302 DIG_STA_BEFORE_CONNECT = 2,
303 DIG_MULTISTA_DISCONNECT = 3,
304 DIG_MULTISTA_CONNECT = 4,
305 DIG_CONNECT_MAX
306};
307
308enum pwr_track_control_method { 267enum pwr_track_control_method {
309 BBSWING, 268 BBSWING,
310 TXAGC 269 TXAGC
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index df549c96adef..791efbe6b18c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -47,164 +47,6 @@ static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
47 return skb->priority; 47 return skb->priority;
48} 48}
49 49
50/* mac80211's rate_idx is like this:
51 *
52 * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
53 *
54 * B/G rate:
55 * (rx_status->flag & RX_FLAG_HT) = 0,
56 * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
57 *
58 * N rate:
59 * (rx_status->flag & RX_FLAG_HT) = 1,
60 * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
61 *
62 * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
63 * A rate:
64 * (rx_status->flag & RX_FLAG_HT) = 0,
65 * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
66 *
67 * N rate:
68 * (rx_status->flag & RX_FLAG_HT) = 1,
69 * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
70 */
71static int _rtl88ee_rate_mapping(struct ieee80211_hw *hw,
72 bool isht, u8 desc_rate)
73{
74 int rate_idx;
75
76 if (!isht) {
77 if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
78 switch (desc_rate) {
79 case DESC92C_RATE1M:
80 rate_idx = 0;
81 break;
82 case DESC92C_RATE2M:
83 rate_idx = 1;
84 break;
85 case DESC92C_RATE5_5M:
86 rate_idx = 2;
87 break;
88 case DESC92C_RATE11M:
89 rate_idx = 3;
90 break;
91 case DESC92C_RATE6M:
92 rate_idx = 4;
93 break;
94 case DESC92C_RATE9M:
95 rate_idx = 5;
96 break;
97 case DESC92C_RATE12M:
98 rate_idx = 6;
99 break;
100 case DESC92C_RATE18M:
101 rate_idx = 7;
102 break;
103 case DESC92C_RATE24M:
104 rate_idx = 8;
105 break;
106 case DESC92C_RATE36M:
107 rate_idx = 9;
108 break;
109 case DESC92C_RATE48M:
110 rate_idx = 10;
111 break;
112 case DESC92C_RATE54M:
113 rate_idx = 11;
114 break;
115 default:
116 rate_idx = 0;
117 break;
118 }
119 } else {
120 switch (desc_rate) {
121 case DESC92C_RATE6M:
122 rate_idx = 0;
123 break;
124 case DESC92C_RATE9M:
125 rate_idx = 1;
126 break;
127 case DESC92C_RATE12M:
128 rate_idx = 2;
129 break;
130 case DESC92C_RATE18M:
131 rate_idx = 3;
132 break;
133 case DESC92C_RATE24M:
134 rate_idx = 4;
135 break;
136 case DESC92C_RATE36M:
137 rate_idx = 5;
138 break;
139 case DESC92C_RATE48M:
140 rate_idx = 6;
141 break;
142 case DESC92C_RATE54M:
143 rate_idx = 7;
144 break;
145 default:
146 rate_idx = 0;
147 break;
148 }
149 }
150 } else {
151 switch (desc_rate) {
152 case DESC92C_RATEMCS0:
153 rate_idx = 0;
154 break;
155 case DESC92C_RATEMCS1:
156 rate_idx = 1;
157 break;
158 case DESC92C_RATEMCS2:
159 rate_idx = 2;
160 break;
161 case DESC92C_RATEMCS3:
162 rate_idx = 3;
163 break;
164 case DESC92C_RATEMCS4:
165 rate_idx = 4;
166 break;
167 case DESC92C_RATEMCS5:
168 rate_idx = 5;
169 break;
170 case DESC92C_RATEMCS6:
171 rate_idx = 6;
172 break;
173 case DESC92C_RATEMCS7:
174 rate_idx = 7;
175 break;
176 case DESC92C_RATEMCS8:
177 rate_idx = 8;
178 break;
179 case DESC92C_RATEMCS9:
180 rate_idx = 9;
181 break;
182 case DESC92C_RATEMCS10:
183 rate_idx = 10;
184 break;
185 case DESC92C_RATEMCS11:
186 rate_idx = 11;
187 break;
188 case DESC92C_RATEMCS12:
189 rate_idx = 12;
190 break;
191 case DESC92C_RATEMCS13:
192 rate_idx = 13;
193 break;
194 case DESC92C_RATEMCS14:
195 rate_idx = 14;
196 break;
197 case DESC92C_RATEMCS15:
198 rate_idx = 15;
199 break;
200 default:
201 rate_idx = 0;
202 break;
203 }
204 }
205 return rate_idx;
206}
207
208static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, 50static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
209 struct rtl_stats *pstatus, u8 *pdesc, 51 struct rtl_stats *pstatus, u8 *pdesc,
210 struct rx_fwinfo_88e *p_drvinfo, 52 struct rx_fwinfo_88e *p_drvinfo,
@@ -630,8 +472,8 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
630 * are use (RX_FLAG_HT) 472 * are use (RX_FLAG_HT)
631 * Notice: this is diff with windows define 473 * Notice: this is diff with windows define
632 */ 474 */
633 rx_status->rate_idx = _rtl88ee_rate_mapping(hw, 475 rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
634 status->is_ht, status->rate); 476 false, status->rate);
635 477
636 rx_status->mactime = status->timestamp_low; 478 rx_status->mactime = status->timestamp_low;
637 if (phystatus == true) { 479 if (phystatus == true) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index f6cb5aedfdd1..f5ee67cda73a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -32,6 +32,7 @@
32#include "phy_common.h" 32#include "phy_common.h"
33#include "../pci.h" 33#include "../pci.h"
34#include "../base.h" 34#include "../base.h"
35#include "../core.h"
35 36
36#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) 37#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
37#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) 38#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
@@ -194,36 +195,6 @@ void dm_savepowerindex(struct ieee80211_hw *hw)
194} 195}
195EXPORT_SYMBOL_GPL(dm_savepowerindex); 196EXPORT_SYMBOL_GPL(dm_savepowerindex);
196 197
197static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
198{
199 struct rtl_priv *rtlpriv = rtl_priv(hw);
200 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
201
202 dm_digtable->dig_enable_flag = true;
203 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
204 dm_digtable->cur_igvalue = 0x20;
205 dm_digtable->pre_igvalue = 0x0;
206 dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
207 dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
208 dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
209 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
210 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
211 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
212 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
213 dm_digtable->rx_gain_max = DM_DIG_MAX;
214 dm_digtable->rx_gain_min = DM_DIG_MIN;
215 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
216 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
217 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
218 dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
219 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi;
220
221 dm_digtable->forbidden_igi = DM_DIG_MIN;
222 dm_digtable->large_fa_hit = 0;
223 dm_digtable->recover_cnt = 0;
224 dm_digtable->dig_dynamic_min = 0x25;
225}
226
227static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) 198static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
228{ 199{
229 struct rtl_priv *rtlpriv = rtl_priv(hw); 200 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -507,27 +478,27 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
507 if (dm_digtable->rssi_val_min > 100) 478 if (dm_digtable->rssi_val_min > 100)
508 dm_digtable->rssi_val_min = 100; 479 dm_digtable->rssi_val_min = 100;
509 480
510 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { 481 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
511 if (dm_digtable->rssi_val_min <= 25) 482 if (dm_digtable->rssi_val_min <= 25)
512 dm_digtable->cur_cck_pd_state = 483 dm_digtable->cur_cck_pd_state =
513 CCK_PD_STAGE_LowRssi; 484 CCK_PD_STAGE_LOWRSSI;
514 else 485 else
515 dm_digtable->cur_cck_pd_state = 486 dm_digtable->cur_cck_pd_state =
516 CCK_PD_STAGE_HighRssi; 487 CCK_PD_STAGE_HIGHRSSI;
517 } else { 488 } else {
518 if (dm_digtable->rssi_val_min <= 20) 489 if (dm_digtable->rssi_val_min <= 20)
519 dm_digtable->cur_cck_pd_state = 490 dm_digtable->cur_cck_pd_state =
520 CCK_PD_STAGE_LowRssi; 491 CCK_PD_STAGE_LOWRSSI;
521 else 492 else
522 dm_digtable->cur_cck_pd_state = 493 dm_digtable->cur_cck_pd_state =
523 CCK_PD_STAGE_HighRssi; 494 CCK_PD_STAGE_HIGHRSSI;
524 } 495 }
525 } else { 496 } else {
526 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; 497 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
527 } 498 }
528 499
529 if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) { 500 if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
530 if ((dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) || 501 if ((dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) ||
531 (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_MAX)) 502 (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_MAX))
532 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83); 503 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
533 else 504 else
@@ -1374,7 +1345,7 @@ void rtl92c_dm_init(struct ieee80211_hw *hw)
1374 rtlpriv->dm.undec_sm_pwdb = -1; 1345 rtlpriv->dm.undec_sm_pwdb = -1;
1375 rtlpriv->dm.undec_sm_cck = -1; 1346 rtlpriv->dm.undec_sm_cck = -1;
1376 rtlpriv->dm.dm_initialgain_enable = true; 1347 rtlpriv->dm.dm_initialgain_enable = true;
1377 rtl92c_dm_diginit(hw); 1348 rtl_dm_diginit(hw, 0x20);
1378 1349
1379 rtlpriv->dm.dm_flag |= HAL_DM_HIPWR_DISABLE; 1350 rtlpriv->dm.dm_flag |= HAL_DM_HIPWR_DISABLE;
1380 rtl92c_dm_init_dynamic_txpower(hw); 1351 rtl92c_dm_init_dynamic_txpower(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
index 4f232a063636..4422e31fedd9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -47,25 +47,12 @@
47#define BW_AUTO_SWITCH_HIGH_LOW 25 47#define BW_AUTO_SWITCH_HIGH_LOW 25
48#define BW_AUTO_SWITCH_LOW_HIGH 30 48#define BW_AUTO_SWITCH_LOW_HIGH 30
49 49
50#define DM_DIG_THRESH_HIGH 40
51#define DM_DIG_THRESH_LOW 35
52
53#define DM_FALSEALARM_THRESH_LOW 400
54#define DM_FALSEALARM_THRESH_HIGH 1000
55
56#define DM_DIG_MAX 0x3e
57#define DM_DIG_MIN 0x1e
58
59#define DM_DIG_FA_UPPER 0x32 50#define DM_DIG_FA_UPPER 0x32
60#define DM_DIG_FA_LOWER 0x20 51#define DM_DIG_FA_LOWER 0x20
61#define DM_DIG_FA_TH0 0x20 52#define DM_DIG_FA_TH0 0x20
62#define DM_DIG_FA_TH1 0x100 53#define DM_DIG_FA_TH1 0x100
63#define DM_DIG_FA_TH2 0x200 54#define DM_DIG_FA_TH2 0x200
64 55
65#define DM_DIG_BACKOFF_MAX 12
66#define DM_DIG_BACKOFF_MIN -4
67#define DM_DIG_BACKOFF_DEFAULT 10
68
69#define RXPATHSELECTION_SS_TH_lOW 30 56#define RXPATHSELECTION_SS_TH_lOW 30
70#define RXPATHSELECTION_DIFF_TH 18 57#define RXPATHSELECTION_DIFF_TH 18
71 58
@@ -123,14 +110,6 @@ enum tag_dynamic_init_gain_operation_type_definition {
123 DIG_OP_TYPE_MAX 110 DIG_OP_TYPE_MAX
124}; 111};
125 112
126enum tag_cck_packet_detection_threshold_type_definition {
127 CCK_PD_STAGE_LowRssi = 0,
128 CCK_PD_STAGE_HighRssi = 1,
129 CCK_FA_STAGE_Low = 2,
130 CCK_FA_STAGE_High = 3,
131 CCK_PD_STAGE_MAX = 4,
132};
133
134enum dm_1r_cca_e { 113enum dm_1r_cca_e {
135 CCA_1R = 0, 114 CCA_1R = 0,
136 CCA_2R = 1, 115 CCA_2R = 1,
@@ -149,23 +128,6 @@ enum dm_sw_ant_switch_e {
149 ANS_ANTENNA_MAX = 3, 128 ANS_ANTENNA_MAX = 3,
150}; 129};
151 130
152enum dm_dig_ext_port_alg_e {
153 DIG_EXT_PORT_STAGE_0 = 0,
154 DIG_EXT_PORT_STAGE_1 = 1,
155 DIG_EXT_PORT_STAGE_2 = 2,
156 DIG_EXT_PORT_STAGE_3 = 3,
157 DIG_EXT_PORT_STAGE_MAX = 4,
158};
159
160enum dm_dig_connect_e {
161 DIG_STA_DISCONNECT = 0,
162 DIG_STA_CONNECT = 1,
163 DIG_STA_BEFORE_CONNECT = 2,
164 DIG_MULTISTA_DISCONNECT = 3,
165 DIG_MULTISTA_CONNECT = 4,
166 DIG_CONNECT_MAX
167};
168
169void rtl92c_dm_init(struct ieee80211_hw *hw); 131void rtl92c_dm_init(struct ieee80211_hw *hw);
170void rtl92c_dm_watchdog(struct ieee80211_hw *hw); 132void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
171void rtl92c_dm_write_dig(struct ieee80211_hw *hw); 133void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index b64ae45dc674..e9f4281f5067 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -37,6 +37,7 @@
37#define FW_8192C_POLLING_DELAY 5 37#define FW_8192C_POLLING_DELAY 5
38#define FW_8192C_POLLING_TIMEOUT_COUNT 100 38#define FW_8192C_POLLING_TIMEOUT_COUNT 100
39#define NORMAL_CHIP BIT(4) 39#define NORMAL_CHIP BIT(4)
40#define H2C_92C_KEEP_ALIVE_CTRL 48
40 41
41#define IS_FW_HEADER_EXIST(_pfwhdr) \ 42#define IS_FW_HEADER_EXIST(_pfwhdr) \
42 ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\ 43 ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 74f9c083b80d..09898cf2e07a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -30,6 +30,7 @@
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../base.h" 31#include "../base.h"
32#include "../pci.h" 32#include "../pci.h"
33#include "../core.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 9c5311c299fd..38ba707015f5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -42,25 +42,12 @@
42#define BW_AUTO_SWITCH_HIGH_LOW 25 42#define BW_AUTO_SWITCH_HIGH_LOW 25
43#define BW_AUTO_SWITCH_LOW_HIGH 30 43#define BW_AUTO_SWITCH_LOW_HIGH 30
44 44
45#define DM_DIG_THRESH_HIGH 40
46#define DM_DIG_THRESH_LOW 35
47
48#define DM_FALSEALARM_THRESH_LOW 400
49#define DM_FALSEALARM_THRESH_HIGH 1000
50
51#define DM_DIG_MAX 0x3e
52#define DM_DIG_MIN 0x1e
53
54#define DM_DIG_FA_UPPER 0x32 45#define DM_DIG_FA_UPPER 0x32
55#define DM_DIG_FA_LOWER 0x20 46#define DM_DIG_FA_LOWER 0x20
56#define DM_DIG_FA_TH0 0x20 47#define DM_DIG_FA_TH0 0x20
57#define DM_DIG_FA_TH1 0x100 48#define DM_DIG_FA_TH1 0x100
58#define DM_DIG_FA_TH2 0x200 49#define DM_DIG_FA_TH2 0x200
59 50
60#define DM_DIG_BACKOFF_MAX 12
61#define DM_DIG_BACKOFF_MIN -4
62#define DM_DIG_BACKOFF_DEFAULT 10
63
64#define RXPATHSELECTION_SS_TH_lOW 30 51#define RXPATHSELECTION_SS_TH_lOW 30
65#define RXPATHSELECTION_DIFF_TH 18 52#define RXPATHSELECTION_DIFF_TH 18
66 53
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 5c646d5f7bb8..303b299376c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -544,8 +544,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
544 (u8 *)(&fw_current_inps)); 544 (u8 *)(&fw_current_inps));
545 } 545 }
546 break; } 546 break; }
547 case HW_VAR_KEEP_ALIVE: 547 case HW_VAR_KEEP_ALIVE: {
548 break; 548 u8 array[2];
549
550 array[0] = 0xff;
551 array[1] = *((u8 *)val);
552 rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, array);
553 break; }
549 default: 554 default:
550 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 555 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
551 "switch case %d not processed\n", variable); 556 "switch case %d not processed\n", variable);
@@ -1156,47 +1161,35 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
1156 struct rtl_priv *rtlpriv = rtl_priv(hw); 1161 struct rtl_priv *rtlpriv = rtl_priv(hw);
1157 u8 bt_msr = rtl_read_byte(rtlpriv, MSR); 1162 u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
1158 enum led_ctl_mode ledaction = LED_CTL_NO_LINK; 1163 enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
1159 bt_msr &= 0xfc; 1164 u8 mode = MSR_NOLINK;
1160 1165
1161 if (type == NL80211_IFTYPE_UNSPECIFIED || 1166 bt_msr &= 0xfc;
1162 type == NL80211_IFTYPE_STATION) {
1163 _rtl92ce_stop_tx_beacon(hw);
1164 _rtl92ce_enable_bcn_sub_func(hw);
1165 } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP ||
1166 type == NL80211_IFTYPE_MESH_POINT) {
1167 _rtl92ce_resume_tx_beacon(hw);
1168 _rtl92ce_disable_bcn_sub_func(hw);
1169 } else {
1170 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1171 "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
1172 type);
1173 }
1174 1167
1175 switch (type) { 1168 switch (type) {
1176 case NL80211_IFTYPE_UNSPECIFIED: 1169 case NL80211_IFTYPE_UNSPECIFIED:
1177 bt_msr |= MSR_NOLINK; 1170 mode = MSR_NOLINK;
1178 ledaction = LED_CTL_LINK;
1179 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 1171 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1180 "Set Network type to NO LINK!\n"); 1172 "Set Network type to NO LINK!\n");
1181 break; 1173 break;
1182 case NL80211_IFTYPE_ADHOC: 1174 case NL80211_IFTYPE_ADHOC:
1183 bt_msr |= MSR_ADHOC; 1175 mode = MSR_ADHOC;
1184 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 1176 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1185 "Set Network type to Ad Hoc!\n"); 1177 "Set Network type to Ad Hoc!\n");
1186 break; 1178 break;
1187 case NL80211_IFTYPE_STATION: 1179 case NL80211_IFTYPE_STATION:
1188 bt_msr |= MSR_INFRA; 1180 mode = MSR_INFRA;
1189 ledaction = LED_CTL_LINK; 1181 ledaction = LED_CTL_LINK;
1190 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 1182 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1191 "Set Network type to STA!\n"); 1183 "Set Network type to STA!\n");
1192 break; 1184 break;
1193 case NL80211_IFTYPE_AP: 1185 case NL80211_IFTYPE_AP:
1194 bt_msr |= MSR_AP; 1186 mode = MSR_AP;
1187 ledaction = LED_CTL_LINK;
1195 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 1188 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1196 "Set Network type to AP!\n"); 1189 "Set Network type to AP!\n");
1197 break; 1190 break;
1198 case NL80211_IFTYPE_MESH_POINT: 1191 case NL80211_IFTYPE_MESH_POINT:
1199 bt_msr |= MSR_ADHOC; 1192 mode = MSR_ADHOC;
1200 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 1193 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1201 "Set Network type to Mesh Point!\n"); 1194 "Set Network type to Mesh Point!\n");
1202 break; 1195 break;
@@ -1207,9 +1200,32 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
1207 1200
1208 } 1201 }
1209 1202
1210 rtl_write_byte(rtlpriv, (MSR), bt_msr); 1203 /* MSR_INFRA == Link in infrastructure network;
1204 * MSR_ADHOC == Link in ad hoc network;
1205 * Therefore, check link state is necessary.
1206 *
1207 * MSR_AP == AP mode; link state does not matter here.
1208 */
1209 if (mode != MSR_AP &&
1210 rtlpriv->mac80211.link_state < MAC80211_LINKED) {
1211 mode = MSR_NOLINK;
1212 ledaction = LED_CTL_NO_LINK;
1213 }
1214 if (mode == MSR_NOLINK || mode == MSR_INFRA) {
1215 _rtl92ce_stop_tx_beacon(hw);
1216 _rtl92ce_enable_bcn_sub_func(hw);
1217 } else if (mode == MSR_ADHOC || mode == MSR_AP) {
1218 _rtl92ce_resume_tx_beacon(hw);
1219 _rtl92ce_disable_bcn_sub_func(hw);
1220 } else {
1221 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1222 "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
1223 mode);
1224 }
1225 rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
1226
1211 rtlpriv->cfg->ops->led_control(hw, ledaction); 1227 rtlpriv->cfg->ops->led_control(hw, ledaction);
1212 if ((bt_msr & MSR_MASK) == MSR_AP) 1228 if (mode == MSR_AP)
1213 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); 1229 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
1214 else 1230 else
1215 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); 1231 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
@@ -1833,7 +1849,6 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
1833 u32 ratr_value; 1849 u32 ratr_value;
1834 u8 ratr_index = 0; 1850 u8 ratr_index = 0;
1835 u8 nmode = mac->ht_enable; 1851 u8 nmode = mac->ht_enable;
1836 u8 mimo_ps = IEEE80211_SMPS_OFF;
1837 u16 shortgi_rate; 1852 u16 shortgi_rate;
1838 u32 tmp_ratr_value; 1853 u32 tmp_ratr_value;
1839 u8 curtxbw_40mhz = mac->bw_40; 1854 u8 curtxbw_40mhz = mac->bw_40;
@@ -1842,6 +1857,7 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
1842 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1857 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1843 1 : 0; 1858 1 : 0;
1844 enum wireless_mode wirelessmode = mac->mode; 1859 enum wireless_mode wirelessmode = mac->mode;
1860 u32 ratr_mask;
1845 1861
1846 if (rtlhal->current_bandtype == BAND_ON_5G) 1862 if (rtlhal->current_bandtype == BAND_ON_5G)
1847 ratr_value = sta->supp_rates[1] << 4; 1863 ratr_value = sta->supp_rates[1] << 4;
@@ -1865,19 +1881,13 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
1865 case WIRELESS_MODE_N_24G: 1881 case WIRELESS_MODE_N_24G:
1866 case WIRELESS_MODE_N_5G: 1882 case WIRELESS_MODE_N_5G:
1867 nmode = 1; 1883 nmode = 1;
1868 if (mimo_ps == IEEE80211_SMPS_STATIC) { 1884 if (get_rf_type(rtlphy) == RF_1T2R ||
1869 ratr_value &= 0x0007F005; 1885 get_rf_type(rtlphy) == RF_1T1R)
1870 } else { 1886 ratr_mask = 0x000ff005;
1871 u32 ratr_mask; 1887 else
1872 1888 ratr_mask = 0x0f0ff005;
1873 if (get_rf_type(rtlphy) == RF_1T2R ||
1874 get_rf_type(rtlphy) == RF_1T1R)
1875 ratr_mask = 0x000ff005;
1876 else
1877 ratr_mask = 0x0f0ff005;
1878 1889
1879 ratr_value &= ratr_mask; 1890 ratr_value &= ratr_mask;
1880 }
1881 break; 1891 break;
1882 default: 1892 default:
1883 if (rtlphy->rf_type == RF_1T2R) 1893 if (rtlphy->rf_type == RF_1T2R)
@@ -1930,17 +1940,16 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
1930 struct rtl_sta_info *sta_entry = NULL; 1940 struct rtl_sta_info *sta_entry = NULL;
1931 u32 ratr_bitmap; 1941 u32 ratr_bitmap;
1932 u8 ratr_index; 1942 u8 ratr_index;
1933 u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; 1943 u8 curtxbw_40mhz = (sta->ht_cap.cap &
1934 u8 curshortgi_40mhz = curtxbw_40mhz && 1944 IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
1935 (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1945 u8 curshortgi_40mhz = (sta->ht_cap.cap &
1936 1 : 0; 1946 IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
1937 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1947 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1938 1 : 0; 1948 1 : 0;
1939 enum wireless_mode wirelessmode = 0; 1949 enum wireless_mode wirelessmode = 0;
1940 bool shortgi = false; 1950 bool shortgi = false;
1941 u8 rate_mask[5]; 1951 u8 rate_mask[5];
1942 u8 macid = 0; 1952 u8 macid = 0;
1943 u8 mimo_ps = IEEE80211_SMPS_OFF;
1944 1953
1945 sta_entry = (struct rtl_sta_info *) sta->drv_priv; 1954 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
1946 wirelessmode = sta_entry->wireless_mode; 1955 wirelessmode = sta_entry->wireless_mode;
@@ -1985,47 +1994,38 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
1985 case WIRELESS_MODE_N_5G: 1994 case WIRELESS_MODE_N_5G:
1986 ratr_index = RATR_INX_WIRELESS_NGB; 1995 ratr_index = RATR_INX_WIRELESS_NGB;
1987 1996
1988 if (mimo_ps == IEEE80211_SMPS_STATIC) { 1997 if (rtlphy->rf_type == RF_1T2R ||
1989 if (rssi_level == 1) 1998 rtlphy->rf_type == RF_1T1R) {
1990 ratr_bitmap &= 0x00070000; 1999 if (curtxbw_40mhz) {
1991 else if (rssi_level == 2) 2000 if (rssi_level == 1)
1992 ratr_bitmap &= 0x0007f000; 2001 ratr_bitmap &= 0x000f0000;
1993 else 2002 else if (rssi_level == 2)
1994 ratr_bitmap &= 0x0007f005; 2003 ratr_bitmap &= 0x000ff000;
2004 else
2005 ratr_bitmap &= 0x000ff015;
2006 } else {
2007 if (rssi_level == 1)
2008 ratr_bitmap &= 0x000f0000;
2009 else if (rssi_level == 2)
2010 ratr_bitmap &= 0x000ff000;
2011 else
2012 ratr_bitmap &= 0x000ff005;
2013 }
1995 } else { 2014 } else {
1996 if (rtlphy->rf_type == RF_1T2R || 2015 if (curtxbw_40mhz) {
1997 rtlphy->rf_type == RF_1T1R) { 2016 if (rssi_level == 1)
1998 if (curtxbw_40mhz) { 2017 ratr_bitmap &= 0x0f0f0000;
1999 if (rssi_level == 1) 2018 else if (rssi_level == 2)
2000 ratr_bitmap &= 0x000f0000; 2019 ratr_bitmap &= 0x0f0ff000;
2001 else if (rssi_level == 2) 2020 else
2002 ratr_bitmap &= 0x000ff000; 2021 ratr_bitmap &= 0x0f0ff015;
2003 else
2004 ratr_bitmap &= 0x000ff015;
2005 } else {
2006 if (rssi_level == 1)
2007 ratr_bitmap &= 0x000f0000;
2008 else if (rssi_level == 2)
2009 ratr_bitmap &= 0x000ff000;
2010 else
2011 ratr_bitmap &= 0x000ff005;
2012 }
2013 } else { 2022 } else {
2014 if (curtxbw_40mhz) { 2023 if (rssi_level == 1)
2015 if (rssi_level == 1) 2024 ratr_bitmap &= 0x0f0f0000;
2016 ratr_bitmap &= 0x0f0f0000; 2025 else if (rssi_level == 2)
2017 else if (rssi_level == 2) 2026 ratr_bitmap &= 0x0f0ff000;
2018 ratr_bitmap &= 0x0f0ff000; 2027 else
2019 else 2028 ratr_bitmap &= 0x0f0ff005;
2020 ratr_bitmap &= 0x0f0ff015;
2021 } else {
2022 if (rssi_level == 1)
2023 ratr_bitmap &= 0x0f0f0000;
2024 else if (rssi_level == 2)
2025 ratr_bitmap &= 0x0f0ff000;
2026 else
2027 ratr_bitmap &= 0x0f0ff005;
2028 }
2029 } 2029 }
2030 } 2030 }
2031 2031
@@ -2058,9 +2058,6 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
2058 "Rate_index:%x, ratr_val:%x, %5phC\n", 2058 "Rate_index:%x, ratr_val:%x, %5phC\n",
2059 ratr_index, ratr_bitmap, rate_mask); 2059 ratr_index, ratr_bitmap, rate_mask);
2060 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); 2060 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
2061
2062 if (macid != 0)
2063 sta_entry->ratr_index = ratr_index;
2064} 2061}
2065 2062
2066void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw, 2063void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index bc5ca989b915..1ee5a6ae9960 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -518,11 +518,12 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
518 } 518 }
519 case ERFSLEEP:{ 519 case ERFSLEEP:{
520 if (ppsc->rfpwr_state == ERFOFF) 520 if (ppsc->rfpwr_state == ERFOFF)
521 return false; 521 break;
522 for (queue_id = 0, i = 0; 522 for (queue_id = 0, i = 0;
523 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 523 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
524 ring = &pcipriv->dev.tx_ring[queue_id]; 524 ring = &pcipriv->dev.tx_ring[queue_id];
525 if (skb_queue_len(&ring->queue) == 0) { 525 if (queue_id == BEACON_QUEUE ||
526 skb_queue_len(&ring->queue) == 0) {
526 queue_id++; 527 queue_id++;
527 continue; 528 continue;
528 } else { 529 } else {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index dd5aa089126a..de6cb6c3a48c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -334,21 +334,21 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = {
334 .maps[RTL_IMR_ROK] = IMR_ROK, 334 .maps[RTL_IMR_ROK] = IMR_ROK,
335 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER), 335 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
336 336
337 .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M, 337 .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
338 .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M, 338 .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
339 .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M, 339 .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
340 .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M, 340 .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
341 .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M, 341 .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
342 .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M, 342 .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
343 .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M, 343 .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
344 .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M, 344 .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
345 .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M, 345 .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
346 .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M, 346 .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
347 .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M, 347 .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
348 .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M, 348 .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
349 349
350 .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7, 350 .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
351 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, 351 .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
352}; 352};
353 353
354static const struct pci_device_id rtl92ce_pci_ids[] = { 354static const struct pci_device_id rtl92ce_pci_ids[] = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index e88dcd0e0af1..84ddd4d07a1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -257,8 +257,8 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
257 pstats->recvsignalpower = rx_pwr_all; 257 pstats->recvsignalpower = rx_pwr_all;
258 258
259 /* (3)EVM of HT rate */ 259 /* (3)EVM of HT rate */
260 if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 && 260 if (pstats->is_ht && pstats->rate >= DESC_RATEMCS8 &&
261 pstats->rate <= DESC92_RATEMCS15) 261 pstats->rate <= DESC_RATEMCS15)
262 max_spatial_stream = 2; 262 max_spatial_stream = 2;
263 else 263 else
264 max_spatial_stream = 1; 264 max_spatial_stream = 1;
@@ -400,9 +400,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
400 * are use (RX_FLAG_HT) 400 * are use (RX_FLAG_HT)
401 * Notice: this is diff with windows define 401 * Notice: this is diff with windows define
402 */ 402 */
403 rx_status->rate_idx = rtlwifi_rate_mapping(hw, 403 rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
404 stats->is_ht, stats->rate, 404 false, stats->rate);
405 stats->isfirst_ampdu);
406 405
407 rx_status->mactime = stats->timestamp_low; 406 rx_status->mactime = stats->timestamp_low;
408 if (phystatus) { 407 if (phystatus) {
@@ -501,7 +500,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
501 SET_TX_DESC_RTS_BW(pdesc, 0); 500 SET_TX_DESC_RTS_BW(pdesc, 0);
502 SET_TX_DESC_RTS_SC(pdesc, tcb_desc->rts_sc); 501 SET_TX_DESC_RTS_SC(pdesc, tcb_desc->rts_sc);
503 SET_TX_DESC_RTS_SHORT(pdesc, 502 SET_TX_DESC_RTS_SHORT(pdesc,
504 ((tcb_desc->rts_rate <= DESC92_RATE54M) ? 503 ((tcb_desc->rts_rate <= DESC_RATE54M) ?
505 (tcb_desc->rts_use_shortpreamble ? 1 : 0) 504 (tcb_desc->rts_use_shortpreamble ? 1 : 0)
506 : (tcb_desc->rts_use_shortgi ? 1 : 0))); 505 : (tcb_desc->rts_use_shortgi ? 1 : 0)));
507 506
@@ -624,7 +623,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
624 if (firstseg) 623 if (firstseg)
625 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); 624 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
626 625
627 SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M); 626 SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
628 627
629 SET_TX_DESC_SEQ(pdesc, 0); 628 SET_TX_DESC_SEQ(pdesc, 0);
630 629
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 551321728ae0..fe4b699a12f5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
1000 local_save_flags(flags); 1000 local_save_flags(flags);
1001 local_irq_enable(); 1001 local_irq_enable();
1002 1002
1003 rtlhal->fw_ready = false;
1003 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU; 1004 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
1004 err = _rtl92cu_init_mac(hw); 1005 err = _rtl92cu_init_mac(hw);
1005 if (err) { 1006 if (err) {
@@ -1013,6 +1014,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
1013 err = 1; 1014 err = 1;
1014 goto exit; 1015 goto exit;
1015 } 1016 }
1017
1018 rtlhal->fw_ready = true;
1016 rtlhal->last_hmeboxnum = 0; /* h2c */ 1019 rtlhal->last_hmeboxnum = 0; /* h2c */
1017 _rtl92cu_phy_param_tab_init(hw); 1020 _rtl92cu_phy_param_tab_init(hw);
1018 rtl92cu_phy_mac_config(hw); 1021 rtl92cu_phy_mac_config(hw);
@@ -1509,6 +1512,7 @@ void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
1509 /* TODO: Modify later (Find the right parameters) 1512 /* TODO: Modify later (Find the right parameters)
1510 * NOTE: Fix test chip's bug (about contention windows's randomness) */ 1513 * NOTE: Fix test chip's bug (about contention windows's randomness) */
1511 if ((mac->opmode == NL80211_IFTYPE_ADHOC) || 1514 if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
1515 (mac->opmode == NL80211_IFTYPE_MESH_POINT) ||
1512 (mac->opmode == NL80211_IFTYPE_AP)) { 1516 (mac->opmode == NL80211_IFTYPE_AP)) {
1513 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50); 1517 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
1514 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50); 1518 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index c2d8ec6afcda..133e395b7401 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -880,8 +880,8 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
880 pstats->rxpower = rx_pwr_all; 880 pstats->rxpower = rx_pwr_all;
881 pstats->recvsignalpower = rx_pwr_all; 881 pstats->recvsignalpower = rx_pwr_all;
882 if (GET_RX_DESC_RX_MCS(pdesc) && 882 if (GET_RX_DESC_RX_MCS(pdesc) &&
883 GET_RX_DESC_RX_MCS(pdesc) >= DESC92_RATEMCS8 && 883 GET_RX_DESC_RX_MCS(pdesc) >= DESC_RATEMCS8 &&
884 GET_RX_DESC_RX_MCS(pdesc) <= DESC92_RATEMCS15) 884 GET_RX_DESC_RX_MCS(pdesc) <= DESC_RATEMCS15)
885 max_spatial_stream = 2; 885 max_spatial_stream = 2;
886 else 886 else
887 max_spatial_stream = 1; 887 max_spatial_stream = 1;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index e06bafee37f9..90a714c189a8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -257,20 +257,20 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = {
257 .maps[RTL_IMR_ROK] = IMR_ROK, 257 .maps[RTL_IMR_ROK] = IMR_ROK,
258 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER), 258 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
259 259
260 .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M, 260 .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
261 .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M, 261 .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
262 .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M, 262 .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
263 .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M, 263 .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
264 .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M, 264 .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
265 .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M, 265 .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
266 .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M, 266 .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
267 .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M, 267 .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
268 .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M, 268 .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
269 .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M, 269 .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
270 .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M, 270 .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
271 .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M, 271 .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
272 .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7, 272 .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
273 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, 273 .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
274}; 274};
275 275
276#define USB_VENDER_ID_REALTEK 0x0bda 276#define USB_VENDER_ID_REALTEK 0x0bda
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index f383d5f1fed5..cbead007171f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -325,6 +325,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
325 && (GET_RX_DESC_FAGGR(pdesc) == 1)); 325 && (GET_RX_DESC_FAGGR(pdesc) == 1));
326 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); 326 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
327 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); 327 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
328 stats->is_ht = (bool)GET_RX_DESC_RX_HT(pdesc);
328 rx_status->freq = hw->conf.chandef.chan->center_freq; 329 rx_status->freq = hw->conf.chandef.chan->center_freq;
329 rx_status->band = hw->conf.chandef.chan->band; 330 rx_status->band = hw->conf.chandef.chan->band;
330 if (GET_RX_DESC_CRC32(pdesc)) 331 if (GET_RX_DESC_CRC32(pdesc))
@@ -338,10 +339,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
338 rx_status->flag |= RX_FLAG_MACTIME_START; 339 rx_status->flag |= RX_FLAG_MACTIME_START;
339 if (stats->decrypted) 340 if (stats->decrypted)
340 rx_status->flag |= RX_FLAG_DECRYPTED; 341 rx_status->flag |= RX_FLAG_DECRYPTED;
341 rx_status->rate_idx = rtlwifi_rate_mapping(hw, 342 rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
342 (bool)GET_RX_DESC_RX_HT(pdesc), 343 false, stats->rate);
343 (u8)GET_RX_DESC_RX_MCS(pdesc),
344 (bool)GET_RX_DESC_PAGGR(pdesc));
345 rx_status->mactime = GET_RX_DESC_TSFL(pdesc); 344 rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
346 if (phystatus) { 345 if (phystatus) {
347 p_drvinfo = (struct rx_fwinfo_92c *)(skb->data + 346 p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
@@ -393,6 +392,7 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
393 && (GET_RX_DESC_FAGGR(rxdesc) == 1)); 392 && (GET_RX_DESC_FAGGR(rxdesc) == 1));
394 stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc); 393 stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
395 stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc); 394 stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
395 stats.is_ht = (bool)GET_RX_DESC_RX_HT(rxdesc);
396 /* TODO: is center_freq changed when doing scan? */ 396 /* TODO: is center_freq changed when doing scan? */
397 /* TODO: Shall we add protection or just skip those two step? */ 397 /* TODO: Shall we add protection or just skip those two step? */
398 rx_status->freq = hw->conf.chandef.chan->center_freq; 398 rx_status->freq = hw->conf.chandef.chan->center_freq;
@@ -406,10 +406,8 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
406 if (GET_RX_DESC_RX_HT(rxdesc)) 406 if (GET_RX_DESC_RX_HT(rxdesc))
407 rx_status->flag |= RX_FLAG_HT; 407 rx_status->flag |= RX_FLAG_HT;
408 /* Data rate */ 408 /* Data rate */
409 rx_status->rate_idx = rtlwifi_rate_mapping(hw, 409 rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats.is_ht,
410 (bool)GET_RX_DESC_RX_HT(rxdesc), 410 false, stats.rate);
411 (u8)GET_RX_DESC_RX_MCS(rxdesc),
412 (bool)GET_RX_DESC_PAGGR(rxdesc));
413 /* There is a phy status after this rx descriptor. */ 411 /* There is a phy status after this rx descriptor. */
414 if (GET_RX_DESC_PHY_STATUS(rxdesc)) { 412 if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
415 p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE); 413 p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
@@ -545,7 +543,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
545 SET_TX_DESC_RTS_BW(txdesc, 0); 543 SET_TX_DESC_RTS_BW(txdesc, 0);
546 SET_TX_DESC_RTS_SC(txdesc, tcb_desc->rts_sc); 544 SET_TX_DESC_RTS_SC(txdesc, tcb_desc->rts_sc);
547 SET_TX_DESC_RTS_SHORT(txdesc, 545 SET_TX_DESC_RTS_SHORT(txdesc,
548 ((tcb_desc->rts_rate <= DESC92_RATE54M) ? 546 ((tcb_desc->rts_rate <= DESC_RATE54M) ?
549 (tcb_desc->rts_use_shortpreamble ? 1 : 0) 547 (tcb_desc->rts_use_shortpreamble ? 1 : 0)
550 : (tcb_desc->rts_use_shortgi ? 1 : 0))); 548 : (tcb_desc->rts_use_shortgi ? 1 : 0)));
551 if (mac->bw_40) { 549 if (mac->bw_40) {
@@ -644,7 +642,7 @@ void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
644 } 642 }
645 SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */ 643 SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
646 SET_TX_DESC_OWN(pDesc, 1); 644 SET_TX_DESC_OWN(pDesc, 1);
647 SET_TX_DESC_TX_RATE(pDesc, DESC92_RATE1M); 645 SET_TX_DESC_TX_RATE(pDesc, DESC_RATE1M);
648 _rtl_tx_desc_checksum(pDesc); 646 _rtl_tx_desc_checksum(pDesc);
649} 647}
650 648
@@ -660,7 +658,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
660 memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE); 658 memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
661 if (firstseg) 659 if (firstseg)
662 SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE); 660 SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
663 SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M); 661 SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
664 SET_TX_DESC_SEQ(pdesc, 0); 662 SET_TX_DESC_SEQ(pdesc, 0);
665 SET_TX_DESC_LINIP(pdesc, 0); 663 SET_TX_DESC_LINIP(pdesc, 0);
666 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); 664 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 304c443b89b2..a1be5a68edfb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -29,6 +29,7 @@
29 29
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../base.h" 31#include "../base.h"
32#include "../core.h"
32#include "reg.h" 33#include "reg.h"
33#include "def.h" 34#include "def.h"
34#include "phy.h" 35#include "phy.h"
@@ -155,34 +156,6 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
155 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */ 156 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
156}; 157};
157 158
158static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
159{
160 struct rtl_priv *rtlpriv = rtl_priv(hw);
161 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
162
163 de_digtable->dig_enable_flag = true;
164 de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
165 de_digtable->cur_igvalue = 0x20;
166 de_digtable->pre_igvalue = 0x0;
167 de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
168 de_digtable->presta_cstate = DIG_STA_DISCONNECT;
169 de_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
170 de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
171 de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
172 de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
173 de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
174 de_digtable->rx_gain_max = DM_DIG_FA_UPPER;
175 de_digtable->rx_gain_min = DM_DIG_FA_LOWER;
176 de_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
177 de_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
178 de_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
179 de_digtable->pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
180 de_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
181 de_digtable->large_fa_hit = 0;
182 de_digtable->recover_cnt = 0;
183 de_digtable->forbidden_igi = DM_DIG_FA_LOWER;
184}
185
186static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) 159static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
187{ 160{
188 u32 ret_value; 161 u32 ret_value;
@@ -1305,7 +1278,9 @@ void rtl92d_dm_init(struct ieee80211_hw *hw)
1305 struct rtl_priv *rtlpriv = rtl_priv(hw); 1278 struct rtl_priv *rtlpriv = rtl_priv(hw);
1306 1279
1307 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; 1280 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
1308 rtl92d_dm_diginit(hw); 1281 rtl_dm_diginit(hw, 0x20);
1282 rtlpriv->dm_digtable.rx_gain_max = DM_DIG_FA_UPPER;
1283 rtlpriv->dm_digtable.rx_gain_min = DM_DIG_FA_LOWER;
1309 rtl92d_dm_init_dynamic_txpower(hw); 1284 rtl92d_dm_init_dynamic_txpower(hw);
1310 rtl92d_dm_init_edca_turbo(hw); 1285 rtl92d_dm_init_edca_turbo(hw);
1311 rtl92d_dm_init_rate_adaptive_mask(hw); 1286 rtl92d_dm_init_rate_adaptive_mask(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
index 3fea0c11c24a..f2d318ceeb28 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
@@ -42,25 +42,12 @@
42#define BW_AUTO_SWITCH_HIGH_LOW 25 42#define BW_AUTO_SWITCH_HIGH_LOW 25
43#define BW_AUTO_SWITCH_LOW_HIGH 30 43#define BW_AUTO_SWITCH_LOW_HIGH 30
44 44
45#define DM_DIG_THRESH_HIGH 40
46#define DM_DIG_THRESH_LOW 35
47
48#define DM_FALSEALARM_THRESH_LOW 400
49#define DM_FALSEALARM_THRESH_HIGH 1000
50
51#define DM_DIG_MAX 0x3e
52#define DM_DIG_MIN 0x1c
53
54#define DM_DIG_FA_UPPER 0x32 45#define DM_DIG_FA_UPPER 0x32
55#define DM_DIG_FA_LOWER 0x20 46#define DM_DIG_FA_LOWER 0x20
56#define DM_DIG_FA_TH0 0x100 47#define DM_DIG_FA_TH0 0x100
57#define DM_DIG_FA_TH1 0x400 48#define DM_DIG_FA_TH1 0x400
58#define DM_DIG_FA_TH2 0x600 49#define DM_DIG_FA_TH2 0x600
59 50
60#define DM_DIG_BACKOFF_MAX 12
61#define DM_DIG_BACKOFF_MIN -4
62#define DM_DIG_BACKOFF_DEFAULT 10
63
64#define RXPATHSELECTION_SS_TH_lOW 30 51#define RXPATHSELECTION_SS_TH_lOW 30
65#define RXPATHSELECTION_DIFF_TH 18 52#define RXPATHSELECTION_DIFF_TH 18
66 53
@@ -108,14 +95,6 @@ enum tag_dynamic_init_gain_operation_type_definition {
108 DIG_OP_TYPE_MAX 95 DIG_OP_TYPE_MAX
109}; 96};
110 97
111enum tag_cck_packet_detection_threshold_type_definition {
112 CCK_PD_STAGE_LOWRSSI = 0,
113 CCK_PD_STAGE_HIGHRSSI = 1,
114 CCK_FA_STAGE_LOW = 2,
115 CCK_FA_STAGE_HIGH = 3,
116 CCK_PD_STAGE_MAX = 4,
117};
118
119enum dm_1r_cca { 98enum dm_1r_cca {
120 CCA_1R = 0, 99 CCA_1R = 0,
121 CCA_2R = 1, 100 CCA_2R = 1,
@@ -134,23 +113,6 @@ enum dm_sw_ant_switch {
134 ANS_ANTENNA_MAX = 3, 113 ANS_ANTENNA_MAX = 3,
135}; 114};
136 115
137enum dm_dig_ext_port_alg {
138 DIG_EXT_PORT_STAGE_0 = 0,
139 DIG_EXT_PORT_STAGE_1 = 1,
140 DIG_EXT_PORT_STAGE_2 = 2,
141 DIG_EXT_PORT_STAGE_3 = 3,
142 DIG_EXT_PORT_STAGE_MAX = 4,
143};
144
145enum dm_dig_connect {
146 DIG_STA_DISCONNECT = 0,
147 DIG_STA_CONNECT = 1,
148 DIG_STA_BEFORE_CONNECT = 2,
149 DIG_MULTISTA_DISCONNECT = 3,
150 DIG_MULTISTA_CONNECT = 4,
151 DIG_CONNECT_MAX
152};
153
154void rtl92d_dm_init(struct ieee80211_hw *hw); 116void rtl92d_dm_init(struct ieee80211_hw *hw);
155void rtl92d_dm_watchdog(struct ieee80211_hw *hw); 117void rtl92d_dm_watchdog(struct ieee80211_hw *hw);
156void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw); 118void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index 23177076b97f..62ef8209718f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -540,23 +540,6 @@ void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
540 return; 540 return;
541} 541}
542 542
543void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
544{
545 struct rtl_priv *rtlpriv = rtl_priv(hw);
546 u8 u1_h2c_set_pwrmode[3] = { 0 };
547 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
548
549 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
550 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
551 SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
552 SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
553 ppsc->reg_max_lps_awakeintvl);
554 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
555 "rtl92d_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode",
556 u1_h2c_set_pwrmode, 3);
557 rtl92d_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
558}
559
560static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw, 543static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
561 struct sk_buff *skb) 544 struct sk_buff *skb)
562{ 545{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
index a55a803a0b4d..1646e7c3d0f8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
@@ -136,7 +136,6 @@ int rtl92d_download_fw(struct ieee80211_hw *hw);
136void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, 136void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
137 u32 cmd_len, u8 *p_cmdbuffer); 137 u32 cmd_len, u8 *p_cmdbuffer);
138void rtl92d_firmware_selfreset(struct ieee80211_hw *hw); 138void rtl92d_firmware_selfreset(struct ieee80211_hw *hw);
139void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
140void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); 139void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
141void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 140void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
142 141
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 280c3da42993..01bcc2d218dc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -546,7 +546,7 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
546 txpktbuf_bndy = 246; 546 txpktbuf_bndy = 246;
547 value8 = 0; 547 value8 = 0;
548 value32 = 0x80bf0d29; 548 value32 = 0x80bf0d29;
549 } else if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) { 549 } else {
550 maxPage = 127; 550 maxPage = 127;
551 txpktbuf_bndy = 123; 551 txpktbuf_bndy = 123;
552 value8 = 0; 552 value8 = 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index a0aba088259a..b19d0398215f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -337,21 +337,21 @@ static struct rtl_hal_cfg rtl92de_hal_cfg = {
337 .maps[RTL_IMR_ROK] = IMR_ROK, 337 .maps[RTL_IMR_ROK] = IMR_ROK,
338 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER), 338 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
339 339
340 .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M, 340 .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
341 .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M, 341 .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
342 .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M, 342 .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
343 .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M, 343 .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
344 .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M, 344 .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
345 .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M, 345 .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
346 .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M, 346 .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
347 .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M, 347 .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
348 .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M, 348 .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
349 .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M, 349 .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
350 .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M, 350 .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
351 .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M, 351 .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
352 352
353 .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7, 353 .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
354 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, 354 .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
355}; 355};
356 356
357static struct pci_device_id rtl92de_pci_ids[] = { 357static struct pci_device_id rtl92de_pci_ids[] = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 8efbcc7af250..1feaa629dd4f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -235,8 +235,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
235 pstats->rx_pwdb_all = pwdb_all; 235 pstats->rx_pwdb_all = pwdb_all;
236 pstats->rxpower = rx_pwr_all; 236 pstats->rxpower = rx_pwr_all;
237 pstats->recvsignalpower = rx_pwr_all; 237 pstats->recvsignalpower = rx_pwr_all;
238 if (pdesc->rxht && pdesc->rxmcs >= DESC92_RATEMCS8 && 238 if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 &&
239 pdesc->rxmcs <= DESC92_RATEMCS15) 239 pdesc->rxmcs <= DESC_RATEMCS15)
240 max_spatial_stream = 2; 240 max_spatial_stream = 2;
241 else 241 else
242 max_spatial_stream = 1; 242 max_spatial_stream = 1;
@@ -499,6 +499,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
499 && (GET_RX_DESC_FAGGR(pdesc) == 1)); 499 && (GET_RX_DESC_FAGGR(pdesc) == 1));
500 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); 500 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
501 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); 501 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
502 stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
502 rx_status->freq = hw->conf.chandef.chan->center_freq; 503 rx_status->freq = hw->conf.chandef.chan->center_freq;
503 rx_status->band = hw->conf.chandef.chan->band; 504 rx_status->band = hw->conf.chandef.chan->band;
504 if (GET_RX_DESC_CRC32(pdesc)) 505 if (GET_RX_DESC_CRC32(pdesc))
@@ -512,10 +513,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
512 rx_status->flag |= RX_FLAG_MACTIME_START; 513 rx_status->flag |= RX_FLAG_MACTIME_START;
513 if (stats->decrypted) 514 if (stats->decrypted)
514 rx_status->flag |= RX_FLAG_DECRYPTED; 515 rx_status->flag |= RX_FLAG_DECRYPTED;
515 rx_status->rate_idx = rtlwifi_rate_mapping(hw, 516 rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
516 (bool)GET_RX_DESC_RXHT(pdesc), 517 false, stats->rate);
517 (u8)GET_RX_DESC_RXMCS(pdesc),
518 (bool)GET_RX_DESC_PAGGR(pdesc));
519 rx_status->mactime = GET_RX_DESC_TSFL(pdesc); 518 rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
520 if (phystatus) { 519 if (phystatus) {
521 p_drvinfo = (struct rx_fwinfo_92d *)(skb->data + 520 p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
@@ -612,14 +611,14 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
612 } 611 }
613 /* 5G have no CCK rate */ 612 /* 5G have no CCK rate */
614 if (rtlhal->current_bandtype == BAND_ON_5G) 613 if (rtlhal->current_bandtype == BAND_ON_5G)
615 if (ptcb_desc->hw_rate < DESC92_RATE6M) 614 if (ptcb_desc->hw_rate < DESC_RATE6M)
616 ptcb_desc->hw_rate = DESC92_RATE6M; 615 ptcb_desc->hw_rate = DESC_RATE6M;
617 SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); 616 SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
618 if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble) 617 if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
619 SET_TX_DESC_DATA_SHORTGI(pdesc, 1); 618 SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
620 619
621 if (rtlhal->macphymode == DUALMAC_DUALPHY && 620 if (rtlhal->macphymode == DUALMAC_DUALPHY &&
622 ptcb_desc->hw_rate == DESC92_RATEMCS7) 621 ptcb_desc->hw_rate == DESC_RATEMCS7)
623 SET_TX_DESC_DATA_SHORTGI(pdesc, 1); 622 SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
624 623
625 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 624 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
@@ -635,13 +634,13 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
635 SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0)); 634 SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
636 /* 5G have no CCK rate */ 635 /* 5G have no CCK rate */
637 if (rtlhal->current_bandtype == BAND_ON_5G) 636 if (rtlhal->current_bandtype == BAND_ON_5G)
638 if (ptcb_desc->rts_rate < DESC92_RATE6M) 637 if (ptcb_desc->rts_rate < DESC_RATE6M)
639 ptcb_desc->rts_rate = DESC92_RATE6M; 638 ptcb_desc->rts_rate = DESC_RATE6M;
640 SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); 639 SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
641 SET_TX_DESC_RTS_BW(pdesc, 0); 640 SET_TX_DESC_RTS_BW(pdesc, 0);
642 SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); 641 SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
643 SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <= 642 SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
644 DESC92_RATE54M) ? 643 DESC_RATE54M) ?
645 (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : 644 (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
646 (ptcb_desc->rts_use_shortgi ? 1 : 0))); 645 (ptcb_desc->rts_use_shortgi ? 1 : 0)));
647 if (bw_40) { 646 if (bw_40) {
@@ -756,9 +755,9 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
756 * The braces are needed no matter what checkpatch says 755 * The braces are needed no matter what checkpatch says
757 */ 756 */
758 if (rtlhal->current_bandtype == BAND_ON_5G) { 757 if (rtlhal->current_bandtype == BAND_ON_5G) {
759 SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE6M); 758 SET_TX_DESC_TX_RATE(pdesc, DESC_RATE6M);
760 } else { 759 } else {
761 SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M); 760 SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
762 } 761 }
763 SET_TX_DESC_SEQ(pdesc, 0); 762 SET_TX_DESC_SEQ(pdesc, 0);
764 SET_TX_DESC_LINIP(pdesc, 0); 763 SET_TX_DESC_LINIP(pdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c
index 77deedf79d1d..459f3d0efa2f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c
@@ -26,6 +26,7 @@
26#include "../wifi.h" 26#include "../wifi.h"
27#include "../base.h" 27#include "../base.h"
28#include "../pci.h" 28#include "../pci.h"
29#include "../core.h"
29#include "reg.h" 30#include "reg.h"
30#include "def.h" 31#include "def.h"
31#include "phy.h" 32#include "phy.h"
@@ -151,35 +152,6 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
151 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */ 152 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
152}; 153};
153 154
154static void rtl92ee_dm_diginit(struct ieee80211_hw *hw)
155{
156 struct rtl_priv *rtlpriv = rtl_priv(hw);
157 struct dig_t *dm_dig = &rtlpriv->dm_digtable;
158
159 dm_dig->cur_igvalue = rtl_get_bbreg(hw, DM_REG_IGI_A_11N,
160 DM_BIT_IGI_11N);
161 dm_dig->rssi_lowthresh = DM_DIG_THRESH_LOW;
162 dm_dig->rssi_highthresh = DM_DIG_THRESH_HIGH;
163 dm_dig->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
164 dm_dig->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
165 dm_dig->rx_gain_max = DM_DIG_MAX;
166 dm_dig->rx_gain_min = DM_DIG_MIN;
167 dm_dig->back_val = DM_DIG_BACKOFF_DEFAULT;
168 dm_dig->back_range_max = DM_DIG_BACKOFF_MAX;
169 dm_dig->back_range_min = DM_DIG_BACKOFF_MIN;
170 dm_dig->pre_cck_cca_thres = 0xff;
171 dm_dig->cur_cck_cca_thres = 0x83;
172 dm_dig->forbidden_igi = DM_DIG_MIN;
173 dm_dig->large_fa_hit = 0;
174 dm_dig->recover_cnt = 0;
175 dm_dig->dig_dynamic_min = DM_DIG_MIN;
176 dm_dig->dig_dynamic_min_1 = DM_DIG_MIN;
177 dm_dig->media_connect_0 = false;
178 dm_dig->media_connect_1 = false;
179 rtlpriv->dm.dm_initialgain_enable = true;
180 dm_dig->bt30_cur_igi = 0x32;
181}
182
183static void rtl92ee_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) 155static void rtl92ee_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
184{ 156{
185 u32 ret_value; 157 u32 ret_value;
@@ -298,7 +270,7 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
298 struct rtl_priv *rtlpriv = rtl_priv(hw); 270 struct rtl_priv *rtlpriv = rtl_priv(hw);
299 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 271 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
300 struct dig_t *dm_dig = &rtlpriv->dm_digtable; 272 struct dig_t *dm_dig = &rtlpriv->dm_digtable;
301 u8 dig_dynamic_min , dig_maxofmin; 273 u8 dig_min_0, dig_maxofmin;
302 bool bfirstconnect , bfirstdisconnect; 274 bool bfirstconnect , bfirstdisconnect;
303 u8 dm_dig_max, dm_dig_min; 275 u8 dm_dig_max, dm_dig_min;
304 u8 current_igi = dm_dig->cur_igvalue; 276 u8 current_igi = dm_dig->cur_igvalue;
@@ -308,7 +280,7 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
308 if (mac->act_scanning) 280 if (mac->act_scanning)
309 return; 281 return;
310 282
311 dig_dynamic_min = dm_dig->dig_dynamic_min; 283 dig_min_0 = dm_dig->dig_min_0;
312 bfirstconnect = (mac->link_state >= MAC80211_LINKED) && 284 bfirstconnect = (mac->link_state >= MAC80211_LINKED) &&
313 !dm_dig->media_connect_0; 285 !dm_dig->media_connect_0;
314 bfirstdisconnect = (mac->link_state < MAC80211_LINKED) && 286 bfirstdisconnect = (mac->link_state < MAC80211_LINKED) &&
@@ -329,19 +301,19 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
329 if (rtlpriv->dm.one_entry_only) { 301 if (rtlpriv->dm.one_entry_only) {
330 offset = 0; 302 offset = 0;
331 if (dm_dig->rssi_val_min - offset < dm_dig_min) 303 if (dm_dig->rssi_val_min - offset < dm_dig_min)
332 dig_dynamic_min = dm_dig_min; 304 dig_min_0 = dm_dig_min;
333 else if (dm_dig->rssi_val_min - offset > 305 else if (dm_dig->rssi_val_min - offset >
334 dig_maxofmin) 306 dig_maxofmin)
335 dig_dynamic_min = dig_maxofmin; 307 dig_min_0 = dig_maxofmin;
336 else 308 else
337 dig_dynamic_min = dm_dig->rssi_val_min - offset; 309 dig_min_0 = dm_dig->rssi_val_min - offset;
338 } else { 310 } else {
339 dig_dynamic_min = dm_dig_min; 311 dig_min_0 = dm_dig_min;
340 } 312 }
341 313
342 } else { 314 } else {
343 dm_dig->rx_gain_max = dm_dig_max; 315 dm_dig->rx_gain_max = dm_dig_max;
344 dig_dynamic_min = dm_dig_min; 316 dig_min_0 = dm_dig_min;
345 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n"); 317 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
346 } 318 }
347 319
@@ -368,10 +340,10 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
368 } else { 340 } else {
369 if (dm_dig->large_fa_hit < 3) { 341 if (dm_dig->large_fa_hit < 3) {
370 if ((dm_dig->forbidden_igi - 1) < 342 if ((dm_dig->forbidden_igi - 1) <
371 dig_dynamic_min) { 343 dig_min_0) {
372 dm_dig->forbidden_igi = dig_dynamic_min; 344 dm_dig->forbidden_igi = dig_min_0;
373 dm_dig->rx_gain_min = 345 dm_dig->rx_gain_min =
374 dig_dynamic_min; 346 dig_min_0;
375 } else { 347 } else {
376 dm_dig->forbidden_igi--; 348 dm_dig->forbidden_igi--;
377 dm_dig->rx_gain_min = 349 dm_dig->rx_gain_min =
@@ -430,7 +402,7 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
430 rtl92ee_dm_write_dig(hw , current_igi); 402 rtl92ee_dm_write_dig(hw , current_igi);
431 dm_dig->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ? 403 dm_dig->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ?
432 true : false); 404 true : false);
433 dm_dig->dig_dynamic_min = dig_dynamic_min; 405 dm_dig->dig_min_0 = dig_min_0;
434} 406}
435 407
436void rtl92ee_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 cur_thres) 408void rtl92ee_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 cur_thres)
@@ -1088,10 +1060,11 @@ static void rtl92ee_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
1088void rtl92ee_dm_init(struct ieee80211_hw *hw) 1060void rtl92ee_dm_init(struct ieee80211_hw *hw)
1089{ 1061{
1090 struct rtl_priv *rtlpriv = rtl_priv(hw); 1062 struct rtl_priv *rtlpriv = rtl_priv(hw);
1063 u32 cur_igvalue = rtl_get_bbreg(hw, DM_REG_IGI_A_11N, DM_BIT_IGI_11N);
1091 1064
1092 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; 1065 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
1093 1066
1094 rtl92ee_dm_diginit(hw); 1067 rtl_dm_diginit(hw, cur_igvalue);
1095 rtl92ee_dm_init_rate_adaptive_mask(hw); 1068 rtl92ee_dm_init_rate_adaptive_mask(hw);
1096 rtl92ee_dm_init_primary_cca_check(hw); 1069 rtl92ee_dm_init_primary_cca_check(hw);
1097 rtl92ee_dm_init_edca_turbo(hw); 1070 rtl92ee_dm_init_edca_turbo(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h
index 881db7d6fef7..107d5a488fa8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h
@@ -189,28 +189,12 @@
189#define BW_AUTO_SWITCH_HIGH_LOW 25 189#define BW_AUTO_SWITCH_HIGH_LOW 25
190#define BW_AUTO_SWITCH_LOW_HIGH 30 190#define BW_AUTO_SWITCH_LOW_HIGH 30
191 191
192#define DM_DIG_THRESH_HIGH 40
193#define DM_DIG_THRESH_LOW 35
194
195#define DM_FALSEALARM_THRESH_LOW 400
196#define DM_FALSEALARM_THRESH_HIGH 1000
197
198#define DM_DIG_MAX 0x3e
199#define DM_DIG_MIN 0x1e
200
201#define DM_DIG_MAX_AP 0x32
202#define DM_DIG_MIN_AP 0x20
203
204#define DM_DIG_FA_UPPER 0x3e 192#define DM_DIG_FA_UPPER 0x3e
205#define DM_DIG_FA_LOWER 0x1e 193#define DM_DIG_FA_LOWER 0x1e
206#define DM_DIG_FA_TH0 0x200 194#define DM_DIG_FA_TH0 0x200
207#define DM_DIG_FA_TH1 0x300 195#define DM_DIG_FA_TH1 0x300
208#define DM_DIG_FA_TH2 0x400 196#define DM_DIG_FA_TH2 0x400
209 197
210#define DM_DIG_BACKOFF_MAX 12
211#define DM_DIG_BACKOFF_MIN -4
212#define DM_DIG_BACKOFF_DEFAULT 10
213
214#define RXPATHSELECTION_SS_TH_LOW 30 198#define RXPATHSELECTION_SS_TH_LOW 30
215#define RXPATHSELECTION_DIFF_TH 18 199#define RXPATHSELECTION_DIFF_TH 18
216 200
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
index 45c128b91f7f..c5d4b8013cde 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
@@ -666,7 +666,6 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
666 struct sk_buff *skb = NULL; 666 struct sk_buff *skb = NULL;
667 667
668 u32 totalpacketlen; 668 u32 totalpacketlen;
669 bool rtstatus;
670 u8 u1rsvdpageloc[5] = { 0 }; 669 u8 u1rsvdpageloc[5] = { 0 };
671 bool b_dlok = false; 670 bool b_dlok = false;
672 671
@@ -728,10 +727,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
728 memcpy((u8 *)skb_put(skb, totalpacketlen), 727 memcpy((u8 *)skb_put(skb, totalpacketlen),
729 &reserved_page_packet, totalpacketlen); 728 &reserved_page_packet, totalpacketlen);
730 729
731 rtstatus = rtl_cmd_send_packet(hw, skb); 730 b_dlok = true;
732
733 if (rtstatus)
734 b_dlok = true;
735 731
736 if (b_dlok) { 732 if (b_dlok) {
737 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD , 733 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
index 1a87edca2c3f..b461b3128da5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
@@ -85,29 +85,6 @@ static void _rtl92ee_enable_bcn_sub_func(struct ieee80211_hw *hw)
85 _rtl92ee_set_bcn_ctrl_reg(hw, 0, BIT(1)); 85 _rtl92ee_set_bcn_ctrl_reg(hw, 0, BIT(1));
86} 86}
87 87
88static void _rtl92ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
89{
90 struct rtl_priv *rtlpriv = rtl_priv(hw);
91 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
92 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
93 unsigned long flags;
94
95 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
96 while (skb_queue_len(&ring->queue)) {
97 struct rtl_tx_buffer_desc *entry =
98 &ring->buffer_desc[ring->idx];
99 struct sk_buff *skb = __skb_dequeue(&ring->queue);
100
101 pci_unmap_single(rtlpci->pdev,
102 rtlpriv->cfg->ops->get_desc(
103 (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
104 skb->len, PCI_DMA_TODEVICE);
105 kfree_skb(skb);
106 ring->idx = (ring->idx + 1) % ring->entries;
107 }
108 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
109}
110
111static void _rtl92ee_disable_bcn_sub_func(struct ieee80211_hw *hw) 88static void _rtl92ee_disable_bcn_sub_func(struct ieee80211_hw *hw)
112{ 89{
113 _rtl92ee_set_bcn_ctrl_reg(hw, BIT(1), 0); 90 _rtl92ee_set_bcn_ctrl_reg(hw, BIT(1), 0);
@@ -403,9 +380,6 @@ static void _rtl92ee_download_rsvd_page(struct ieee80211_hw *hw)
403 rtl_write_byte(rtlpriv, REG_DWBCN0_CTRL + 2, 380 rtl_write_byte(rtlpriv, REG_DWBCN0_CTRL + 2,
404 bcnvalid_reg | BIT(0)); 381 bcnvalid_reg | BIT(0));
405 382
406 /* Return Beacon TCB */
407 _rtl92ee_return_beacon_queue_skb(hw);
408
409 /* download rsvd page */ 383 /* download rsvd page */
410 rtl92ee_set_fw_rsvdpagepkt(hw, false); 384 rtl92ee_set_fw_rsvdpagepkt(hw, false);
411 385
@@ -1163,6 +1137,139 @@ void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw)
1163 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); 1137 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
1164} 1138}
1165 1139
1140static bool _rtl8192ee_check_pcie_dma_hang(struct rtl_priv *rtlpriv)
1141{
1142 u8 tmp;
1143
1144 /* write reg 0x350 Bit[26]=1. Enable debug port. */
1145 tmp = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3);
1146 if (!(tmp & BIT(2))) {
1147 rtl_write_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3,
1148 tmp | BIT(2));
1149 mdelay(100); /* Suggested by DD Justin_tsai. */
1150 }
1151
1152 /* read reg 0x350 Bit[25] if 1 : RX hang
1153 * read reg 0x350 Bit[24] if 1 : TX hang
1154 */
1155 tmp = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3);
1156 if ((tmp & BIT(0)) || (tmp & BIT(1))) {
1157 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1158 "CheckPcieDMAHang8192EE(): true!!\n");
1159 return true;
1160 }
1161 return false;
1162}
1163
1164static void _rtl8192ee_reset_pcie_interface_dma(struct rtl_priv *rtlpriv,
1165 bool mac_power_on)
1166{
1167 u8 tmp;
1168 bool release_mac_rx_pause;
1169 u8 backup_pcie_dma_pause;
1170
1171 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1172 "ResetPcieInterfaceDMA8192EE()\n");
1173
1174 /* Revise Note: Follow the document "PCIe RX DMA Hang Reset Flow_v03"
1175 * released by SD1 Alan.
1176 */
1177
1178 /* 1. disable register write lock
1179 * write 0x1C bit[1:0] = 2'h0
1180 * write 0xCC bit[2] = 1'b1
1181 */
1182 tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL);
1183 tmp &= ~(BIT(1) | BIT(0));
1184 rtl_write_byte(rtlpriv, REG_RSV_CTRL, tmp);
1185 tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
1186 tmp |= BIT(2);
1187 rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
1188
1189 /* 2. Check and pause TRX DMA
1190 * write 0x284 bit[18] = 1'b1
1191 * write 0x301 = 0xFF
1192 */
1193 tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
1194 if (tmp & BIT(2)) {
1195 /* Already pause before the function for another reason. */
1196 release_mac_rx_pause = false;
1197 } else {
1198 rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, (tmp | BIT(2)));
1199 release_mac_rx_pause = true;
1200 }
1201
1202 backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 1);
1203 if (backup_pcie_dma_pause != 0xFF)
1204 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFF);
1205
1206 if (mac_power_on) {
1207 /* 3. reset TRX function
1208 * write 0x100 = 0x00
1209 */
1210 rtl_write_byte(rtlpriv, REG_CR, 0);
1211 }
1212
1213 /* 4. Reset PCIe DMA
1214 * write 0x003 bit[0] = 0
1215 */
1216 tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
1217 tmp &= ~(BIT(0));
1218 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp);
1219
1220 /* 5. Enable PCIe DMA
1221 * write 0x003 bit[0] = 1
1222 */
1223 tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
1224 tmp |= BIT(0);
1225 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp);
1226
1227 if (mac_power_on) {
1228 /* 6. enable TRX function
1229 * write 0x100 = 0xFF
1230 */
1231 rtl_write_byte(rtlpriv, REG_CR, 0xFF);
1232
1233 /* We should init LLT & RQPN and
1234 * prepare Tx/Rx descrptor address later
1235 * because MAC function is reset.
1236 */
1237 }
1238
1239 /* 7. Restore PCIe autoload down bit
1240 * write 0xF8 bit[17] = 1'b1
1241 */
1242 tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2);
1243 tmp |= BIT(1);
1244 rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2, tmp);
1245
1246 /* In MAC power on state, BB and RF maybe in ON state,
1247 * if we release TRx DMA here
1248 * it will cause packets to be started to Tx/Rx,
1249 * so we release Tx/Rx DMA later.
1250 */
1251 if (!mac_power_on) {
1252 /* 8. release TRX DMA
1253 * write 0x284 bit[18] = 1'b0
1254 * write 0x301 = 0x00
1255 */
1256 if (release_mac_rx_pause) {
1257 tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
1258 rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL,
1259 (tmp & (~BIT(2))));
1260 }
1261 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1,
1262 backup_pcie_dma_pause);
1263 }
1264
1265 /* 9. lock system register
1266 * write 0xCC bit[2] = 1'b0
1267 */
1268 tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
1269 tmp &= ~(BIT(2));
1270 rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
1271}
1272
1166int rtl92ee_hw_init(struct ieee80211_hw *hw) 1273int rtl92ee_hw_init(struct ieee80211_hw *hw)
1167{ 1274{
1168 struct rtl_priv *rtlpriv = rtl_priv(hw); 1275 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1188,6 +1295,13 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
1188 rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_92E; 1295 rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_92E;
1189 } 1296 }
1190 1297
1298 if (_rtl8192ee_check_pcie_dma_hang(rtlpriv)) {
1299 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "92ee dma hang!\n");
1300 _rtl8192ee_reset_pcie_interface_dma(rtlpriv,
1301 rtlhal->mac_func_enable);
1302 rtlhal->mac_func_enable = false;
1303 }
1304
1191 rtstatus = _rtl92ee_init_mac(hw); 1305 rtstatus = _rtl92ee_init_mac(hw);
1192 1306
1193 rtl_write_byte(rtlpriv, 0x577, 0x03); 1307 rtl_write_byte(rtlpriv, 0x577, 0x03);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
index 3f2a9596e7cd..1eaa1fab550d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
@@ -77,9 +77,11 @@
77#define REG_HIMRE 0x00B8 77#define REG_HIMRE 0x00B8
78#define REG_HISRE 0x00BC 78#define REG_HISRE 0x00BC
79 79
80#define REG_PMC_DBG_CTRL2 0x00CC
80#define REG_EFUSE_ACCESS 0x00CF 81#define REG_EFUSE_ACCESS 0x00CF
81#define REG_HPON_FSM 0x00EC 82#define REG_HPON_FSM 0x00EC
82#define REG_SYS_CFG1 0x00F0 83#define REG_SYS_CFG1 0x00F0
84#define REG_MAC_PHY_CTRL_NORMAL 0x00F8
83#define REG_SYS_CFG2 0x00FC 85#define REG_SYS_CFG2 0x00FC
84 86
85#define REG_CR 0x0100 87#define REG_CR 0x0100
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
index 9b5a7d5be121..c31c6bfb536d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
@@ -113,8 +113,6 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
113 RCR_HTC_LOC_CTRL | 113 RCR_HTC_LOC_CTRL |
114 RCR_AMF | 114 RCR_AMF |
115 RCR_ACF | 115 RCR_ACF |
116 RCR_ADF |
117 RCR_AICV |
118 RCR_ACRC32 | 116 RCR_ACRC32 |
119 RCR_AB | 117 RCR_AB |
120 RCR_AM | 118 RCR_AM |
@@ -241,6 +239,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = {
241 .set_desc = rtl92ee_set_desc, 239 .set_desc = rtl92ee_set_desc,
242 .get_desc = rtl92ee_get_desc, 240 .get_desc = rtl92ee_get_desc,
243 .is_tx_desc_closed = rtl92ee_is_tx_desc_closed, 241 .is_tx_desc_closed = rtl92ee_is_tx_desc_closed,
242 .get_available_desc = rtl92ee_get_available_desc,
244 .tx_polling = rtl92ee_tx_polling, 243 .tx_polling = rtl92ee_tx_polling,
245 .enable_hw_sec = rtl92ee_enable_hw_security_config, 244 .enable_hw_sec = rtl92ee_enable_hw_security_config,
246 .set_key = rtl92ee_set_key, 245 .set_key = rtl92ee_set_key,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
index 2fcbef1d029f..d39ee67f6113 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
@@ -47,164 +47,6 @@ static u8 _rtl92ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
47 return skb->priority; 47 return skb->priority;
48} 48}
49 49
50/* mac80211's rate_idx is like this:
51 *
52 * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
53 *
54 * B/G rate:
55 * (rx_status->flag & RX_FLAG_HT) = 0,
56 * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
57 *
58 * N rate:
59 * (rx_status->flag & RX_FLAG_HT) = 1,
60 * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
61 *
62 * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
63 * A rate:
64 * (rx_status->flag & RX_FLAG_HT) = 0,
65 * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
66 *
67 * N rate:
68 * (rx_status->flag & RX_FLAG_HT) = 1,
69 * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
70 */
71static int _rtl92ee_rate_mapping(struct ieee80211_hw *hw,
72 bool isht, u8 desc_rate)
73{
74 int rate_idx;
75
76 if (!isht) {
77 if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
78 switch (desc_rate) {
79 case DESC92C_RATE1M:
80 rate_idx = 0;
81 break;
82 case DESC92C_RATE2M:
83 rate_idx = 1;
84 break;
85 case DESC92C_RATE5_5M:
86 rate_idx = 2;
87 break;
88 case DESC92C_RATE11M:
89 rate_idx = 3;
90 break;
91 case DESC92C_RATE6M:
92 rate_idx = 4;
93 break;
94 case DESC92C_RATE9M:
95 rate_idx = 5;
96 break;
97 case DESC92C_RATE12M:
98 rate_idx = 6;
99 break;
100 case DESC92C_RATE18M:
101 rate_idx = 7;
102 break;
103 case DESC92C_RATE24M:
104 rate_idx = 8;
105 break;
106 case DESC92C_RATE36M:
107 rate_idx = 9;
108 break;
109 case DESC92C_RATE48M:
110 rate_idx = 10;
111 break;
112 case DESC92C_RATE54M:
113 rate_idx = 11;
114 break;
115 default:
116 rate_idx = 0;
117 break;
118 }
119 } else {
120 switch (desc_rate) {
121 case DESC92C_RATE6M:
122 rate_idx = 0;
123 break;
124 case DESC92C_RATE9M:
125 rate_idx = 1;
126 break;
127 case DESC92C_RATE12M:
128 rate_idx = 2;
129 break;
130 case DESC92C_RATE18M:
131 rate_idx = 3;
132 break;
133 case DESC92C_RATE24M:
134 rate_idx = 4;
135 break;
136 case DESC92C_RATE36M:
137 rate_idx = 5;
138 break;
139 case DESC92C_RATE48M:
140 rate_idx = 6;
141 break;
142 case DESC92C_RATE54M:
143 rate_idx = 7;
144 break;
145 default:
146 rate_idx = 0;
147 break;
148 }
149 }
150 } else {
151 switch (desc_rate) {
152 case DESC92C_RATEMCS0:
153 rate_idx = 0;
154 break;
155 case DESC92C_RATEMCS1:
156 rate_idx = 1;
157 break;
158 case DESC92C_RATEMCS2:
159 rate_idx = 2;
160 break;
161 case DESC92C_RATEMCS3:
162 rate_idx = 3;
163 break;
164 case DESC92C_RATEMCS4:
165 rate_idx = 4;
166 break;
167 case DESC92C_RATEMCS5:
168 rate_idx = 5;
169 break;
170 case DESC92C_RATEMCS6:
171 rate_idx = 6;
172 break;
173 case DESC92C_RATEMCS7:
174 rate_idx = 7;
175 break;
176 case DESC92C_RATEMCS8:
177 rate_idx = 8;
178 break;
179 case DESC92C_RATEMCS9:
180 rate_idx = 9;
181 break;
182 case DESC92C_RATEMCS10:
183 rate_idx = 10;
184 break;
185 case DESC92C_RATEMCS11:
186 rate_idx = 11;
187 break;
188 case DESC92C_RATEMCS12:
189 rate_idx = 12;
190 break;
191 case DESC92C_RATEMCS13:
192 rate_idx = 13;
193 break;
194 case DESC92C_RATEMCS14:
195 rate_idx = 14;
196 break;
197 case DESC92C_RATEMCS15:
198 rate_idx = 15;
199 break;
200 default:
201 rate_idx = 0;
202 break;
203 }
204 }
205 return rate_idx;
206}
207
208static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw, 50static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
209 struct rtl_stats *pstatus, u8 *pdesc, 51 struct rtl_stats *pstatus, u8 *pdesc,
210 struct rx_fwinfo *p_drvinfo, 52 struct rx_fwinfo *p_drvinfo,
@@ -345,8 +187,8 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
345 pstatus->recvsignalpower = rx_pwr_all; 187 pstatus->recvsignalpower = rx_pwr_all;
346 188
347 /* (3)EVM of HT rate */ 189 /* (3)EVM of HT rate */
348 if (pstatus->rate >= DESC92C_RATEMCS8 && 190 if (pstatus->rate >= DESC_RATEMCS8 &&
349 pstatus->rate <= DESC92C_RATEMCS15) 191 pstatus->rate <= DESC_RATEMCS15)
350 max_spatial_stream = 2; 192 max_spatial_stream = 2;
351 else 193 else
352 max_spatial_stream = 1; 194 max_spatial_stream = 1;
@@ -512,6 +354,10 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
512 struct ieee80211_hdr *hdr; 354 struct ieee80211_hdr *hdr;
513 u32 phystatus = GET_RX_DESC_PHYST(pdesc); 355 u32 phystatus = GET_RX_DESC_PHYST(pdesc);
514 356
357 if (GET_RX_STATUS_DESC_RPT_SEL(pdesc) == 0)
358 status->packet_report_type = NORMAL_RX;
359 else
360 status->packet_report_type = C2H_PACKET;
515 status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc); 361 status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
516 status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) * 362 status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
517 RX_DRV_INFO_SIZE_UNIT; 363 RX_DRV_INFO_SIZE_UNIT;
@@ -576,9 +422,8 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
576 * are use (RX_FLAG_HT) 422 * are use (RX_FLAG_HT)
577 * Notice: this is diff with windows define 423 * Notice: this is diff with windows define
578 */ 424 */
579 rx_status->rate_idx = _rtl92ee_rate_mapping(hw, 425 rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
580 status->is_ht, 426 false, status->rate);
581 status->rate);
582 427
583 rx_status->mactime = status->timestamp_low; 428 rx_status->mactime = status->timestamp_low;
584 if (phystatus) { 429 if (phystatus) {
@@ -654,14 +499,7 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
654 if (!start_rx) 499 if (!start_rx)
655 return 0; 500 return 0;
656 501
657 if ((last_read_point > (RX_DESC_NUM_92E / 2)) && 502 remind_cnt = calc_fifo_space(read_point, write_point);
658 (read_point <= (RX_DESC_NUM_92E / 2))) {
659 remind_cnt = RX_DESC_NUM_92E - write_point;
660 } else {
661 remind_cnt = (read_point >= write_point) ?
662 (read_point - write_point) :
663 (RX_DESC_NUM_92E - write_point + read_point);
664 }
665 503
666 if (remind_cnt == 0) 504 if (remind_cnt == 0)
667 return 0; 505 return 0;
@@ -710,7 +548,7 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
710 return desc_address; 548 return desc_address;
711} 549}
712 550
713void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) 551u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
714{ 552{
715 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 553 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
716 struct rtl_priv *rtlpriv = rtl_priv(hw); 554 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -723,12 +561,11 @@ void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
723 current_tx_read_point = (u16)((tmp_4byte >> 16) & 0x0fff); 561 current_tx_read_point = (u16)((tmp_4byte >> 16) & 0x0fff);
724 current_tx_write_point = (u16)((tmp_4byte) & 0x0fff); 562 current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
725 563
726 point_diff = ((current_tx_read_point > current_tx_write_point) ? 564 point_diff = calc_fifo_space(current_tx_read_point,
727 (current_tx_read_point - current_tx_write_point) : 565 current_tx_write_point);
728 (TX_DESC_NUM_92E - current_tx_write_point +
729 current_tx_read_point));
730 566
731 rtlpci->tx_ring[q_idx].avl_desc = point_diff; 567 rtlpci->tx_ring[q_idx].avl_desc = point_diff;
568 return point_diff;
732} 569}
733 570
734void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, 571void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
@@ -901,13 +738,13 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
901 } else { 738 } else {
902 if (rtlpriv->ra.is_special_data) { 739 if (rtlpriv->ra.is_special_data) {
903 ptcb_desc->use_driver_rate = true; 740 ptcb_desc->use_driver_rate = true;
904 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE11M); 741 SET_TX_DESC_TX_RATE(pdesc, DESC_RATE11M);
905 } else { 742 } else {
906 ptcb_desc->use_driver_rate = false; 743 ptcb_desc->use_driver_rate = false;
907 } 744 }
908 } 745 }
909 746
910 if (ptcb_desc->hw_rate > DESC92C_RATEMCS0) 747 if (ptcb_desc->hw_rate > DESC_RATEMCS0)
911 short_gi = (ptcb_desc->use_shortgi) ? 1 : 0; 748 short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
912 else 749 else
913 short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0; 750 short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
@@ -927,7 +764,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
927 SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); 764 SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
928 SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); 765 SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
929 SET_TX_DESC_RTS_SHORT(pdesc, 766 SET_TX_DESC_RTS_SHORT(pdesc,
930 ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ? 767 ((ptcb_desc->rts_rate <= DESC_RATE54M) ?
931 (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : 768 (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
932 (ptcb_desc->rts_use_shortgi ? 1 : 0))); 769 (ptcb_desc->rts_use_shortgi ? 1 : 0)));
933 770
@@ -1038,7 +875,7 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
1038 if (firstseg) 875 if (firstseg)
1039 SET_TX_DESC_OFFSET(pdesc, txdesc_len); 876 SET_TX_DESC_OFFSET(pdesc, txdesc_len);
1040 877
1041 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); 878 SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
1042 879
1043 SET_TX_DESC_SEQ(pdesc, 0); 880 SET_TX_DESC_SEQ(pdesc, 0);
1044 881
@@ -1207,8 +1044,7 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
1207 static u8 stop_report_cnt; 1044 static u8 stop_report_cnt;
1208 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; 1045 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
1209 1046
1210 /*checking Read/Write Point each interrupt wastes CPU */ 1047 {
1211 if (stop_report_cnt > 15 || !rtlpriv->link_info.busytraffic) {
1212 u16 point_diff = 0; 1048 u16 point_diff = 0;
1213 u16 cur_tx_rp, cur_tx_wp; 1049 u16 cur_tx_rp, cur_tx_wp;
1214 u32 tmpu32 = 0; 1050 u32 tmpu32 = 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
index 6f9be1c7515c..8f78ac9e6040 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
@@ -542,6 +542,8 @@
542 LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) 542 LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
543#define GET_RX_DESC_RX_IS_QOS(__pdesc) \ 543#define GET_RX_DESC_RX_IS_QOS(__pdesc) \
544 LE_BITS_TO_4BYTE(__pdesc+8, 16, 1) 544 LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
545#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
546 LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
545 547
546#define GET_RX_DESC_RXMCS(__pdesc) \ 548#define GET_RX_DESC_RXMCS(__pdesc) \
547 LE_BITS_TO_4BYTE(__pdesc+12, 0, 7) 549 LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
@@ -591,10 +593,10 @@ do { \
591} while (0) 593} while (0)
592 594
593#define RTL92EE_RX_HAL_IS_CCK_RATE(rxmcs)\ 595#define RTL92EE_RX_HAL_IS_CCK_RATE(rxmcs)\
594 (rxmcs == DESC92C_RATE1M ||\ 596 (rxmcs == DESC_RATE1M ||\
595 rxmcs == DESC92C_RATE2M ||\ 597 rxmcs == DESC_RATE2M ||\
596 rxmcs == DESC92C_RATE5_5M ||\ 598 rxmcs == DESC_RATE5_5M ||\
597 rxmcs == DESC92C_RATE11M) 599 rxmcs == DESC_RATE11M)
598 600
599#define IS_LITTLE_ENDIAN 1 601#define IS_LITTLE_ENDIAN 1
600 602
@@ -829,7 +831,7 @@ void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc,
829 u8 queue_index); 831 u8 queue_index);
830u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, 832u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw,
831 u8 queue_index); 833 u8 queue_index);
832void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index); 834u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
833void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, 835void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
834 u8 *tx_bd_desc, u8 *desc, u8 queue_index, 836 u8 *tx_bd_desc, u8 *desc, u8 queue_index,
835 struct sk_buff *skb, dma_addr_t addr); 837 struct sk_buff *skb, dma_addr_t addr);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index 6e7a70b43949..ef87c09b77d0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -450,10 +450,10 @@
450 SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32) 450 SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
451 451
452#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\ 452#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
453 (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M || \ 453 (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE1M || \
454 GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE2M || \ 454 GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE2M || \
455 GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE5_5M ||\ 455 GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE5_5M ||\
456 GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE11M) 456 GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC_RATE11M)
457 457
458enum rf_optype { 458enum rf_optype {
459 RF_OP_BY_SW_3WIRE = 0, 459 RF_OP_BY_SW_3WIRE = 0,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index b3a2d5ec59e6..575980b88658 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -29,6 +29,7 @@
29 29
30#include "../wifi.h" 30#include "../wifi.h"
31#include "../base.h" 31#include "../base.h"
32#include "../core.h"
32#include "reg.h" 33#include "reg.h"
33#include "def.h" 34#include "def.h"
34#include "phy.h" 35#include "phy.h"
@@ -469,7 +470,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
469 if (digtable->backoff_enable_flag) 470 if (digtable->backoff_enable_flag)
470 rtl92s_backoff_enable_flag(hw); 471 rtl92s_backoff_enable_flag(hw);
471 else 472 else
472 digtable->back_val = DM_DIG_BACKOFF; 473 digtable->back_val = DM_DIG_BACKOFF_MAX;
473 474
474 if ((digtable->rssi_val + 10 - digtable->back_val) > 475 if ((digtable->rssi_val + 10 - digtable->back_val) >
475 digtable->rx_gain_max) 476 digtable->rx_gain_max)
@@ -503,7 +504,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
503 digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 504 digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
504 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE); 505 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE);
505 506
506 digtable->back_val = DM_DIG_BACKOFF; 507 digtable->back_val = DM_DIG_BACKOFF_MAX;
507 digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0]; 508 digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0];
508 digtable->pre_igvalue = 0; 509 digtable->pre_igvalue = 0;
509 return; 510 return;
@@ -691,7 +692,7 @@ static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
691 692
692 /* for dig debug rssi value */ 693 /* for dig debug rssi value */
693 digtable->rssi_val = 50; 694 digtable->rssi_val = 50;
694 digtable->back_val = DM_DIG_BACKOFF; 695 digtable->back_val = DM_DIG_BACKOFF_MAX;
695 digtable->rx_gain_max = DM_DIG_MAX; 696 digtable->rx_gain_max = DM_DIG_MAX;
696 697
697 digtable->rx_gain_min = DM_DIG_MIN; 698 digtable->rx_gain_min = DM_DIG_MIN;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
index 2e9052c8fe4b..de6ac796c74d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
@@ -54,24 +54,6 @@ enum dm_dig_sta {
54 DM_STA_DIG_MAX 54 DM_STA_DIG_MAX
55}; 55};
56 56
57enum dm_dig_connect {
58 DIG_STA_DISCONNECT = 0,
59 DIG_STA_CONNECT = 1,
60 DIG_STA_BEFORE_CONNECT = 2,
61 DIG_AP_DISCONNECT = 3,
62 DIG_AP_CONNECT = 4,
63 DIG_AP_ADD_STATION = 5,
64 DIG_CONNECT_MAX
65};
66
67enum dm_dig_ext_port_alg {
68 DIG_EXT_PORT_STAGE_0 = 0,
69 DIG_EXT_PORT_STAGE_1 = 1,
70 DIG_EXT_PORT_STAGE_2 = 2,
71 DIG_EXT_PORT_STAGE_3 = 3,
72 DIG_EXT_PORT_STAGE_MAX = 4,
73};
74
75enum dm_ratr_sta { 57enum dm_ratr_sta {
76 DM_RATR_STA_HIGH = 0, 58 DM_RATR_STA_HIGH = 0,
77 DM_RATR_STA_MIDDLEHIGH = 1, 59 DM_RATR_STA_MIDDLEHIGH = 1,
@@ -99,22 +81,12 @@ enum dm_ratr_sta {
99#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 81#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
100#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 82#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
101 83
102#define DM_DIG_THRESH_HIGH 40
103#define DM_DIG_THRESH_LOW 35
104#define DM_FALSEALARM_THRESH_LOW 40
105#define DM_FALSEALARM_THRESH_HIGH 1000
106#define DM_DIG_HIGH_PWR_THRESH_HIGH 75 84#define DM_DIG_HIGH_PWR_THRESH_HIGH 75
107#define DM_DIG_HIGH_PWR_THRESH_LOW 70 85#define DM_DIG_HIGH_PWR_THRESH_LOW 70
108#define DM_DIG_BACKOFF 12
109#define DM_DIG_MAX 0x3e
110#define DM_DIG_MIN 0x1c
111#define DM_DIG_MIN_Netcore 0x12 86#define DM_DIG_MIN_Netcore 0x12
112#define DM_DIG_BACKOFF_MAX 12
113#define DM_DIG_BACKOFF_MIN -4
114 87
115void rtl92s_dm_watchdog(struct ieee80211_hw *hw); 88void rtl92s_dm_watchdog(struct ieee80211_hw *hw);
116void rtl92s_dm_init(struct ieee80211_hw *hw); 89void rtl92s_dm_init(struct ieee80211_hw *hw);
117void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw); 90void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw);
118 91
119#endif 92#endif
120
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index fb003868bdef..e1fd27c888bf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -383,21 +383,21 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
383 .maps[RTL_IMR_ROK] = IMR_ROK, 383 .maps[RTL_IMR_ROK] = IMR_ROK,
384 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER), 384 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
385 385
386 .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M, 386 .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
387 .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M, 387 .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
388 .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M, 388 .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
389 .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M, 389 .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
390 .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M, 390 .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
391 .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M, 391 .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
392 .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M, 392 .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
393 .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M, 393 .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
394 .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M, 394 .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
395 .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M, 395 .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
396 .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M, 396 .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
397 .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M, 397 .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
398 398
399 .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7, 399 .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
400 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, 400 .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
401}; 401};
402 402
403static struct pci_device_id rtl92se_pci_ids[] = { 403static struct pci_device_id rtl92se_pci_ids[] = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 672fd3b02835..125b29bd2f93 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -191,8 +191,8 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
191 pstats->rxpower = rx_pwr_all; 191 pstats->rxpower = rx_pwr_all;
192 pstats->recvsignalpower = rx_pwr_all; 192 pstats->recvsignalpower = rx_pwr_all;
193 193
194 if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 && 194 if (pstats->is_ht && pstats->rate >= DESC_RATEMCS8 &&
195 pstats->rate <= DESC92_RATEMCS15) 195 pstats->rate <= DESC_RATEMCS15)
196 max_spatial_stream = 2; 196 max_spatial_stream = 2;
197 else 197 else
198 max_spatial_stream = 1; 198 max_spatial_stream = 1;
@@ -264,7 +264,6 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
264 struct rx_fwinfo *p_drvinfo; 264 struct rx_fwinfo *p_drvinfo;
265 u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc); 265 u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc);
266 struct ieee80211_hdr *hdr; 266 struct ieee80211_hdr *hdr;
267 bool first_ampdu = false;
268 267
269 stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc); 268 stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc);
270 stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8; 269 stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8;
@@ -319,8 +318,8 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
319 rx_status->flag |= RX_FLAG_DECRYPTED; 318 rx_status->flag |= RX_FLAG_DECRYPTED;
320 } 319 }
321 320
322 rx_status->rate_idx = rtlwifi_rate_mapping(hw, 321 rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
323 stats->is_ht, stats->rate, first_ampdu); 322 false, stats->rate);
324 323
325 rx_status->mactime = stats->timestamp_low; 324 rx_status->mactime = stats->timestamp_low;
326 if (phystatus) { 325 if (phystatus) {
@@ -394,14 +393,14 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
394 SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid); 393 SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid);
395 394
396 SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >= 395 SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >=
397 DESC92_RATEMCS0) ? 1 : 0)); 396 DESC_RATEMCS0) ? 1 : 0));
398 397
399 if (rtlhal->version == VERSION_8192S_ACUT) { 398 if (rtlhal->version == VERSION_8192S_ACUT) {
400 if (ptcb_desc->hw_rate == DESC92_RATE1M || 399 if (ptcb_desc->hw_rate == DESC_RATE1M ||
401 ptcb_desc->hw_rate == DESC92_RATE2M || 400 ptcb_desc->hw_rate == DESC_RATE2M ||
402 ptcb_desc->hw_rate == DESC92_RATE5_5M || 401 ptcb_desc->hw_rate == DESC_RATE5_5M ||
403 ptcb_desc->hw_rate == DESC92_RATE11M) { 402 ptcb_desc->hw_rate == DESC_RATE11M) {
404 ptcb_desc->hw_rate = DESC92_RATE12M; 403 ptcb_desc->hw_rate = DESC_RATE12M;
405 } 404 }
406 } 405 }
407 406
@@ -430,7 +429,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
430 SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0); 429 SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0);
431 SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc); 430 SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc);
432 SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <= 431 SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
433 DESC92_RATE54M) ? 432 DESC_RATE54M) ?
434 (ptcb_desc->rts_use_shortpreamble ? 1 : 0) 433 (ptcb_desc->rts_use_shortpreamble ? 1 : 0)
435 : (ptcb_desc->rts_use_shortgi ? 1 : 0))); 434 : (ptcb_desc->rts_use_shortgi ? 1 : 0)));
436 435
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
index a0e86922780a..4c1c96c96a5a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
@@ -26,6 +26,7 @@
26#include "../wifi.h" 26#include "../wifi.h"
27#include "../base.h" 27#include "../base.h"
28#include "../pci.h" 28#include "../pci.h"
29#include "../core.h"
29#include "reg.h" 30#include "reg.h"
30#include "def.h" 31#include "def.h"
31#include "phy.h" 32#include "phy.h"
@@ -146,31 +147,6 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
146 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} 147 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
147}; 148};
148 149
149static void rtl8723e_dm_diginit(struct ieee80211_hw *hw)
150{
151 struct rtl_priv *rtlpriv = rtl_priv(hw);
152 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
153
154 dm_digtable->dig_enable_flag = true;
155 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
156 dm_digtable->cur_igvalue = 0x20;
157 dm_digtable->pre_igvalue = 0x0;
158 dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
159 dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
160 dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
161 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
162 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
163 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
164 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
165 dm_digtable->rx_gain_max = DM_DIG_MAX;
166 dm_digtable->rx_gain_min = DM_DIG_MIN;
167 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
168 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
169 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
170 dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
171 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
172}
173
174static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) 150static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
175{ 151{
176 struct rtl_priv *rtlpriv = rtl_priv(hw); 152 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -395,30 +371,30 @@ static void rtl8723e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
395 if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) { 371 if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
396 dm_digtable->rssi_val_min = rtl8723e_dm_initial_gain_min_pwdb(hw); 372 dm_digtable->rssi_val_min = rtl8723e_dm_initial_gain_min_pwdb(hw);
397 373
398 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { 374 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
399 if (dm_digtable->rssi_val_min <= 25) 375 if (dm_digtable->rssi_val_min <= 25)
400 dm_digtable->cur_cck_pd_state = 376 dm_digtable->cur_cck_pd_state =
401 CCK_PD_STAGE_LowRssi; 377 CCK_PD_STAGE_LOWRSSI;
402 else 378 else
403 dm_digtable->cur_cck_pd_state = 379 dm_digtable->cur_cck_pd_state =
404 CCK_PD_STAGE_HighRssi; 380 CCK_PD_STAGE_HIGHRSSI;
405 } else { 381 } else {
406 if (dm_digtable->rssi_val_min <= 20) 382 if (dm_digtable->rssi_val_min <= 20)
407 dm_digtable->cur_cck_pd_state = 383 dm_digtable->cur_cck_pd_state =
408 CCK_PD_STAGE_LowRssi; 384 CCK_PD_STAGE_LOWRSSI;
409 else 385 else
410 dm_digtable->cur_cck_pd_state = 386 dm_digtable->cur_cck_pd_state =
411 CCK_PD_STAGE_HighRssi; 387 CCK_PD_STAGE_HIGHRSSI;
412 } 388 }
413 } else { 389 } else {
414 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; 390 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
415 } 391 }
416 392
417 if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) { 393 if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
418 if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) { 394 if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
419 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800) 395 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
420 dm_digtable->cur_cck_fa_state = 396 dm_digtable->cur_cck_fa_state =
421 CCK_FA_STAGE_High; 397 CCK_FA_STAGE_HIGH;
422 else 398 else
423 dm_digtable->cur_cck_fa_state = 399 dm_digtable->cur_cck_fa_state =
424 CCK_FA_STAGE_LOW; 400 CCK_FA_STAGE_LOW;
@@ -818,7 +794,7 @@ void rtl8723e_dm_init(struct ieee80211_hw *hw)
818 struct rtl_priv *rtlpriv = rtl_priv(hw); 794 struct rtl_priv *rtlpriv = rtl_priv(hw);
819 795
820 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; 796 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
821 rtl8723e_dm_diginit(hw); 797 rtl_dm_diginit(hw, 0x20);
822 rtl8723_dm_init_dynamic_txpower(hw); 798 rtl8723_dm_init_dynamic_txpower(hw);
823 rtl8723_dm_init_edca_turbo(hw); 799 rtl8723_dm_init_edca_turbo(hw);
824 rtl8723e_dm_init_rate_adaptive_mask(hw); 800 rtl8723e_dm_init_rate_adaptive_mask(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
index 6fa0feb05f6d..57111052e86b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
@@ -42,25 +42,12 @@
42#define BW_AUTO_SWITCH_HIGH_LOW 25 42#define BW_AUTO_SWITCH_HIGH_LOW 25
43#define BW_AUTO_SWITCH_LOW_HIGH 30 43#define BW_AUTO_SWITCH_LOW_HIGH 30
44 44
45#define DM_DIG_THRESH_HIGH 40
46#define DM_DIG_THRESH_LOW 35
47
48#define DM_FALSEALARM_THRESH_LOW 400
49#define DM_FALSEALARM_THRESH_HIGH 1000
50
51#define DM_DIG_MAX 0x3e
52#define DM_DIG_MIN 0x1e
53
54#define DM_DIG_FA_UPPER 0x32 45#define DM_DIG_FA_UPPER 0x32
55#define DM_DIG_FA_LOWER 0x20 46#define DM_DIG_FA_LOWER 0x20
56#define DM_DIG_FA_TH0 0x20 47#define DM_DIG_FA_TH0 0x20
57#define DM_DIG_FA_TH1 0x100 48#define DM_DIG_FA_TH1 0x100
58#define DM_DIG_FA_TH2 0x200 49#define DM_DIG_FA_TH2 0x200
59 50
60#define DM_DIG_BACKOFF_MAX 12
61#define DM_DIG_BACKOFF_MIN -4
62#define DM_DIG_BACKOFF_DEFAULT 10
63
64#define RXPATHSELECTION_SS_TH_LOW 30 51#define RXPATHSELECTION_SS_TH_LOW 30
65#define RXPATHSELECTION_DIFF_TH 18 52#define RXPATHSELECTION_DIFF_TH 18
66 53
@@ -108,14 +95,6 @@ enum tag_dynamic_init_gain_operation_type_definition {
108 DIG_OP_TYPE_MAX 95 DIG_OP_TYPE_MAX
109}; 96};
110 97
111enum tag_cck_packet_detection_threshold_type_definition {
112 CCK_PD_STAGE_LowRssi = 0,
113 CCK_PD_STAGE_HighRssi = 1,
114 CCK_FA_STAGE_LOW = 2,
115 CCK_FA_STAGE_High = 3,
116 CCK_PD_STAGE_MAX = 4,
117};
118
119enum dm_1r_cca_e { 98enum dm_1r_cca_e {
120 CCA_1R = 0, 99 CCA_1R = 0,
121 CCA_2R = 1, 100 CCA_2R = 1,
@@ -134,23 +113,6 @@ enum dm_sw_ant_switch_e {
134 ANS_ANTENNA_MAX = 3, 113 ANS_ANTENNA_MAX = 3,
135}; 114};
136 115
137enum dm_dig_ext_port_alg_e {
138 DIG_EXT_PORT_STAGE_0 = 0,
139 DIG_EXT_PORT_STAGE_1 = 1,
140 DIG_EXT_PORT_STAGE_2 = 2,
141 DIG_EXT_PORT_STAGE_3 = 3,
142 DIG_EXT_PORT_STAGE_MAX = 4,
143};
144
145enum dm_dig_connect_e {
146 DIG_STA_DISCONNECT = 0,
147 DIG_STA_CONNECT = 1,
148 DIG_STA_BEFORE_CONNECT = 2,
149 DIG_MULTISTA_DISCONNECT = 3,
150 DIG_MULTISTA_CONNECT = 4,
151 DIG_CONNECT_MAX
152};
153
154#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) 116#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
155#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) 117#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
156#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) 118#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index d372ccaf3465..2f7c144d7980 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -45,164 +45,6 @@ static u8 _rtl8723e_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
45 return skb->priority; 45 return skb->priority;
46} 46}
47 47
48/* mac80211's rate_idx is like this:
49 *
50 * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
51 *
52 * B/G rate:
53 * (rx_status->flag & RX_FLAG_HT) = 0,
54 * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
55 *
56 * N rate:
57 * (rx_status->flag & RX_FLAG_HT) = 1,
58 * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
59 *
60 * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
61 * A rate:
62 * (rx_status->flag & RX_FLAG_HT) = 0,
63 * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
64 *
65 * N rate:
66 * (rx_status->flag & RX_FLAG_HT) = 1,
67 * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
68 */
69static int _rtl8723e_rate_mapping(struct ieee80211_hw *hw,
70 bool isht, u8 desc_rate)
71{
72 int rate_idx;
73
74 if (!isht) {
75 if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
76 switch (desc_rate) {
77 case DESC92C_RATE1M:
78 rate_idx = 0;
79 break;
80 case DESC92C_RATE2M:
81 rate_idx = 1;
82 break;
83 case DESC92C_RATE5_5M:
84 rate_idx = 2;
85 break;
86 case DESC92C_RATE11M:
87 rate_idx = 3;
88 break;
89 case DESC92C_RATE6M:
90 rate_idx = 4;
91 break;
92 case DESC92C_RATE9M:
93 rate_idx = 5;
94 break;
95 case DESC92C_RATE12M:
96 rate_idx = 6;
97 break;
98 case DESC92C_RATE18M:
99 rate_idx = 7;
100 break;
101 case DESC92C_RATE24M:
102 rate_idx = 8;
103 break;
104 case DESC92C_RATE36M:
105 rate_idx = 9;
106 break;
107 case DESC92C_RATE48M:
108 rate_idx = 10;
109 break;
110 case DESC92C_RATE54M:
111 rate_idx = 11;
112 break;
113 default:
114 rate_idx = 0;
115 break;
116 }
117 } else {
118 switch (desc_rate) {
119 case DESC92C_RATE6M:
120 rate_idx = 0;
121 break;
122 case DESC92C_RATE9M:
123 rate_idx = 1;
124 break;
125 case DESC92C_RATE12M:
126 rate_idx = 2;
127 break;
128 case DESC92C_RATE18M:
129 rate_idx = 3;
130 break;
131 case DESC92C_RATE24M:
132 rate_idx = 4;
133 break;
134 case DESC92C_RATE36M:
135 rate_idx = 5;
136 break;
137 case DESC92C_RATE48M:
138 rate_idx = 6;
139 break;
140 case DESC92C_RATE54M:
141 rate_idx = 7;
142 break;
143 default:
144 rate_idx = 0;
145 break;
146 }
147 }
148 } else {
149 switch (desc_rate) {
150 case DESC92C_RATEMCS0:
151 rate_idx = 0;
152 break;
153 case DESC92C_RATEMCS1:
154 rate_idx = 1;
155 break;
156 case DESC92C_RATEMCS2:
157 rate_idx = 2;
158 break;
159 case DESC92C_RATEMCS3:
160 rate_idx = 3;
161 break;
162 case DESC92C_RATEMCS4:
163 rate_idx = 4;
164 break;
165 case DESC92C_RATEMCS5:
166 rate_idx = 5;
167 break;
168 case DESC92C_RATEMCS6:
169 rate_idx = 6;
170 break;
171 case DESC92C_RATEMCS7:
172 rate_idx = 7;
173 break;
174 case DESC92C_RATEMCS8:
175 rate_idx = 8;
176 break;
177 case DESC92C_RATEMCS9:
178 rate_idx = 9;
179 break;
180 case DESC92C_RATEMCS10:
181 rate_idx = 10;
182 break;
183 case DESC92C_RATEMCS11:
184 rate_idx = 11;
185 break;
186 case DESC92C_RATEMCS12:
187 rate_idx = 12;
188 break;
189 case DESC92C_RATEMCS13:
190 rate_idx = 13;
191 break;
192 case DESC92C_RATEMCS14:
193 rate_idx = 14;
194 break;
195 case DESC92C_RATEMCS15:
196 rate_idx = 15;
197 break;
198 default:
199 rate_idx = 0;
200 break;
201 }
202 }
203 return rate_idx;
204}
205
206static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw, 48static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw,
207 struct rtl_stats *pstatus, u8 *pdesc, 49 struct rtl_stats *pstatus, u8 *pdesc,
208 struct rx_fwinfo_8723e *p_drvinfo, 50 struct rx_fwinfo_8723e *p_drvinfo,
@@ -503,8 +345,8 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw,
503 * are use (RX_FLAG_HT) 345 * are use (RX_FLAG_HT)
504 * Notice: this is diff with windows define 346 * Notice: this is diff with windows define
505 */ 347 */
506 rx_status->rate_idx = _rtl8723e_rate_mapping(hw, 348 rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
507 status->is_ht, status->rate); 349 false, status->rate);
508 350
509 rx_status->mactime = status->timestamp_low; 351 rx_status->mactime = status->timestamp_low;
510 if (phystatus == true) { 352 if (phystatus == true) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
index dd7eb4371f49..2367e8f47a5b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
@@ -26,6 +26,7 @@
26#include "../wifi.h" 26#include "../wifi.h"
27#include "../base.h" 27#include "../base.h"
28#include "../pci.h" 28#include "../pci.h"
29#include "../core.h"
29#include "reg.h" 30#include "reg.h"
30#include "def.h" 31#include "def.h"
31#include "phy.h" 32#include "phy.h"
@@ -211,35 +212,6 @@ void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type,
211 (pwr_val << 16) | (pwr_val << 24); 212 (pwr_val << 16) | (pwr_val << 24);
212} 213}
213 214
214static void rtl8723be_dm_diginit(struct ieee80211_hw *hw)
215{
216 struct rtl_priv *rtlpriv = rtl_priv(hw);
217 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
218
219 dm_digtable->dig_enable_flag = true;
220 dm_digtable->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
221 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
222 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
223 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
224 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
225 dm_digtable->rx_gain_max = DM_DIG_MAX;
226 dm_digtable->rx_gain_min = DM_DIG_MIN;
227 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
228 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
229 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
230 dm_digtable->pre_cck_cca_thres = 0xff;
231 dm_digtable->cur_cck_cca_thres = 0x83;
232 dm_digtable->forbidden_igi = DM_DIG_MIN;
233 dm_digtable->large_fa_hit = 0;
234 dm_digtable->recover_cnt = 0;
235 dm_digtable->dig_dynamic_min = DM_DIG_MIN;
236 dm_digtable->dig_dynamic_min_1 = DM_DIG_MIN;
237 dm_digtable->media_connect_0 = false;
238 dm_digtable->media_connect_1 = false;
239 rtlpriv->dm.dm_initialgain_enable = true;
240 dm_digtable->bt30_cur_igi = 0x32;
241}
242
243void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) 215void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
244{ 216{
245 struct rtl_priv *rtlpriv = rtl_priv(hw); 217 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -293,9 +265,10 @@ static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
293void rtl8723be_dm_init(struct ieee80211_hw *hw) 265void rtl8723be_dm_init(struct ieee80211_hw *hw)
294{ 266{
295 struct rtl_priv *rtlpriv = rtl_priv(hw); 267 struct rtl_priv *rtlpriv = rtl_priv(hw);
268 u32 cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
296 269
297 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; 270 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
298 rtl8723be_dm_diginit(hw); 271 rtl_dm_diginit(hw, cur_igvalue);
299 rtl8723be_dm_init_rate_adaptive_mask(hw); 272 rtl8723be_dm_init_rate_adaptive_mask(hw);
300 rtl8723_dm_init_edca_turbo(hw); 273 rtl8723_dm_init_edca_turbo(hw);
301 rtl8723_dm_init_dynamic_bb_powersaving(hw); 274 rtl8723_dm_init_dynamic_bb_powersaving(hw);
@@ -424,7 +397,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
424 struct rtl_priv *rtlpriv = rtl_priv(hw); 397 struct rtl_priv *rtlpriv = rtl_priv(hw);
425 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 398 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
426 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 399 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
427 u8 dig_dynamic_min, dig_maxofmin; 400 u8 dig_min_0, dig_maxofmin;
428 bool bfirstconnect, bfirstdisconnect; 401 bool bfirstconnect, bfirstdisconnect;
429 u8 dm_dig_max, dm_dig_min; 402 u8 dm_dig_max, dm_dig_min;
430 u8 current_igi = dm_digtable->cur_igvalue; 403 u8 current_igi = dm_digtable->cur_igvalue;
@@ -434,7 +407,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
434 if (mac->act_scanning) 407 if (mac->act_scanning)
435 return; 408 return;
436 409
437 dig_dynamic_min = dm_digtable->dig_dynamic_min; 410 dig_min_0 = dm_digtable->dig_min_0;
438 bfirstconnect = (mac->link_state >= MAC80211_LINKED) && 411 bfirstconnect = (mac->link_state >= MAC80211_LINKED) &&
439 !dm_digtable->media_connect_0; 412 !dm_digtable->media_connect_0;
440 bfirstdisconnect = (mac->link_state < MAC80211_LINKED) && 413 bfirstdisconnect = (mac->link_state < MAC80211_LINKED) &&
@@ -456,20 +429,20 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
456 if (rtlpriv->dm.one_entry_only) { 429 if (rtlpriv->dm.one_entry_only) {
457 offset = 12; 430 offset = 12;
458 if (dm_digtable->rssi_val_min - offset < dm_dig_min) 431 if (dm_digtable->rssi_val_min - offset < dm_dig_min)
459 dig_dynamic_min = dm_dig_min; 432 dig_min_0 = dm_dig_min;
460 else if (dm_digtable->rssi_val_min - offset > 433 else if (dm_digtable->rssi_val_min - offset >
461 dig_maxofmin) 434 dig_maxofmin)
462 dig_dynamic_min = dig_maxofmin; 435 dig_min_0 = dig_maxofmin;
463 else 436 else
464 dig_dynamic_min = 437 dig_min_0 =
465 dm_digtable->rssi_val_min - offset; 438 dm_digtable->rssi_val_min - offset;
466 } else { 439 } else {
467 dig_dynamic_min = dm_dig_min; 440 dig_min_0 = dm_dig_min;
468 } 441 }
469 442
470 } else { 443 } else {
471 dm_digtable->rx_gain_max = dm_dig_max; 444 dm_digtable->rx_gain_max = dm_dig_max;
472 dig_dynamic_min = dm_dig_min; 445 dig_min_0 = dm_dig_min;
473 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n"); 446 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
474 } 447 }
475 448
@@ -497,11 +470,11 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
497 } else { 470 } else {
498 if (dm_digtable->large_fa_hit < 3) { 471 if (dm_digtable->large_fa_hit < 3) {
499 if ((dm_digtable->forbidden_igi - 1) < 472 if ((dm_digtable->forbidden_igi - 1) <
500 dig_dynamic_min) { 473 dig_min_0) {
501 dm_digtable->forbidden_igi = 474 dm_digtable->forbidden_igi =
502 dig_dynamic_min; 475 dig_min_0;
503 dm_digtable->rx_gain_min = 476 dm_digtable->rx_gain_min =
504 dig_dynamic_min; 477 dig_min_0;
505 } else { 478 } else {
506 dm_digtable->forbidden_igi--; 479 dm_digtable->forbidden_igi--;
507 dm_digtable->rx_gain_min = 480 dm_digtable->rx_gain_min =
@@ -552,7 +525,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
552 rtl8723be_dm_write_dig(hw, current_igi); 525 rtl8723be_dm_write_dig(hw, current_igi);
553 dm_digtable->media_connect_0 = 526 dm_digtable->media_connect_0 =
554 ((mac->link_state >= MAC80211_LINKED) ? true : false); 527 ((mac->link_state >= MAC80211_LINKED) ? true : false);
555 dm_digtable->dig_dynamic_min = dig_dynamic_min; 528 dm_digtable->dig_min_0 = dig_min_0;
556} 529}
557 530
558static void rtl8723be_dm_false_alarm_counter_statistics( 531static void rtl8723be_dm_false_alarm_counter_statistics(
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
index e4c0e8ae6f47..f752a2cad63d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
@@ -180,28 +180,12 @@
180#define BW_AUTO_SWITCH_HIGH_LOW 25 180#define BW_AUTO_SWITCH_HIGH_LOW 25
181#define BW_AUTO_SWITCH_LOW_HIGH 30 181#define BW_AUTO_SWITCH_LOW_HIGH 30
182 182
183#define DM_DIG_THRESH_HIGH 40
184#define DM_DIG_THRESH_LOW 35
185
186#define DM_FALSEALARM_THRESH_LOW 400
187#define DM_FALSEALARM_THRESH_HIGH 1000
188
189#define DM_DIG_MAX 0x3e
190#define DM_DIG_MIN 0x1e
191
192#define DM_DIG_MAX_AP 0x32
193#define DM_DIG_MIN_AP 0x20
194
195#define DM_DIG_FA_UPPER 0x3e 183#define DM_DIG_FA_UPPER 0x3e
196#define DM_DIG_FA_LOWER 0x1e 184#define DM_DIG_FA_LOWER 0x1e
197#define DM_DIG_FA_TH0 0x200 185#define DM_DIG_FA_TH0 0x200
198#define DM_DIG_FA_TH1 0x300 186#define DM_DIG_FA_TH1 0x300
199#define DM_DIG_FA_TH2 0x400 187#define DM_DIG_FA_TH2 0x400
200 188
201#define DM_DIG_BACKOFF_MAX 12
202#define DM_DIG_BACKOFF_MIN -4
203#define DM_DIG_BACKOFF_DEFAULT 10
204
205#define RXPATHSELECTION_SS_TH_LOW 30 189#define RXPATHSELECTION_SS_TH_LOW 30
206#define RXPATHSELECTION_DIFF_TH 18 190#define RXPATHSELECTION_DIFF_TH 18
207 191
@@ -252,23 +236,6 @@ enum dm_sw_ant_switch_e {
252 ANS_ANTENNA_MAX = 3, 236 ANS_ANTENNA_MAX = 3,
253}; 237};
254 238
255enum dm_dig_ext_port_alg_e {
256 DIG_EXT_PORT_STAGE_0 = 0,
257 DIG_EXT_PORT_STAGE_1 = 1,
258 DIG_EXT_PORT_STAGE_2 = 2,
259 DIG_EXT_PORT_STAGE_3 = 3,
260 DIG_EXT_PORT_STAGE_MAX = 4,
261};
262
263enum dm_dig_connect_e {
264 DIG_STA_DISCONNECT = 0,
265 DIG_STA_CONNECT = 1,
266 DIG_STA_BEFORE_CONNECT = 2,
267 DIG_MULTISTA_DISCONNECT = 3,
268 DIG_MULTISTA_CONNECT = 4,
269 DIG_CONNECT_MAX
270};
271
272enum pwr_track_control_method { 239enum pwr_track_control_method {
273 BBSWING, 240 BBSWING,
274 TXAGC 241 TXAGC
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
index 20dcc25c506c..b7b73cbe346d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
@@ -874,31 +874,6 @@ void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
874 ROFDM0_RXDETECTOR3, rtlphy->framesync); 874 ROFDM0_RXDETECTOR3, rtlphy->framesync);
875} 875}
876 876
877void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
878{
879 struct rtl_priv *rtlpriv = rtl_priv(hw);
880 struct rtl_phy *rtlphy = &rtlpriv->phy;
881 u8 txpwr_level;
882 long txpwr_dbm;
883
884 txpwr_level = rtlphy->cur_cck_txpwridx;
885 txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B,
886 txpwr_level);
887 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
888 if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) >
889 txpwr_dbm)
890 txpwr_dbm =
891 rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
892 txpwr_level);
893 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
894 if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
895 txpwr_level) > txpwr_dbm)
896 txpwr_dbm =
897 rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
898 txpwr_level);
899 *powerlevel = txpwr_dbm;
900}
901
902static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path, 877static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
903 u8 rate) 878 u8 rate)
904{ 879{
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
index 6339738a0e33..9021d4745ab7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
@@ -114,8 +114,6 @@ bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw);
114bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw); 114bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw);
115bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw); 115bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw);
116void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); 116void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
117void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw,
118 long *powerlevel);
119void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, 117void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw,
120 u8 channel); 118 u8 channel);
121void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, 119void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 223eb42992bd..1017f02d7bf7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -387,12 +387,14 @@ module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); 387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
388module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, 388module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
389 bool, 0444); 389 bool, 0444);
390MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n"); 390MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
391MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n"); 391MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
392MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n"); 392MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
393MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
393MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); 394MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
394MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 395MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
395MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); 396MODULE_PARM_DESC(disable_watchdog,
397 "Set to 1 to disable the watchdog (default 0)\n");
396 398
397static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); 399static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
398 400
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
index d6a1c70cb657..338ec9a9d09b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
@@ -47,164 +47,6 @@ static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
47 return skb->priority; 47 return skb->priority;
48} 48}
49 49
50/* mac80211's rate_idx is like this:
51 *
52 * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
53 *
54 * B/G rate:
55 * (rx_status->flag & RX_FLAG_HT) = 0,
56 * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
57 *
58 * N rate:
59 * (rx_status->flag & RX_FLAG_HT) = 1,
60 * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
61 *
62 * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
63 * A rate:
64 * (rx_status->flag & RX_FLAG_HT) = 0,
65 * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
66 *
67 * N rate:
68 * (rx_status->flag & RX_FLAG_HT) = 1,
69 * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
70 */
71static int _rtl8723be_rate_mapping(struct ieee80211_hw *hw,
72 bool isht, u8 desc_rate)
73{
74 int rate_idx;
75
76 if (!isht) {
77 if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
78 switch (desc_rate) {
79 case DESC92C_RATE1M:
80 rate_idx = 0;
81 break;
82 case DESC92C_RATE2M:
83 rate_idx = 1;
84 break;
85 case DESC92C_RATE5_5M:
86 rate_idx = 2;
87 break;
88 case DESC92C_RATE11M:
89 rate_idx = 3;
90 break;
91 case DESC92C_RATE6M:
92 rate_idx = 4;
93 break;
94 case DESC92C_RATE9M:
95 rate_idx = 5;
96 break;
97 case DESC92C_RATE12M:
98 rate_idx = 6;
99 break;
100 case DESC92C_RATE18M:
101 rate_idx = 7;
102 break;
103 case DESC92C_RATE24M:
104 rate_idx = 8;
105 break;
106 case DESC92C_RATE36M:
107 rate_idx = 9;
108 break;
109 case DESC92C_RATE48M:
110 rate_idx = 10;
111 break;
112 case DESC92C_RATE54M:
113 rate_idx = 11;
114 break;
115 default:
116 rate_idx = 0;
117 break;
118 }
119 } else {
120 switch (desc_rate) {
121 case DESC92C_RATE6M:
122 rate_idx = 0;
123 break;
124 case DESC92C_RATE9M:
125 rate_idx = 1;
126 break;
127 case DESC92C_RATE12M:
128 rate_idx = 2;
129 break;
130 case DESC92C_RATE18M:
131 rate_idx = 3;
132 break;
133 case DESC92C_RATE24M:
134 rate_idx = 4;
135 break;
136 case DESC92C_RATE36M:
137 rate_idx = 5;
138 break;
139 case DESC92C_RATE48M:
140 rate_idx = 6;
141 break;
142 case DESC92C_RATE54M:
143 rate_idx = 7;
144 break;
145 default:
146 rate_idx = 0;
147 break;
148 }
149 }
150 } else {
151 switch (desc_rate) {
152 case DESC92C_RATEMCS0:
153 rate_idx = 0;
154 break;
155 case DESC92C_RATEMCS1:
156 rate_idx = 1;
157 break;
158 case DESC92C_RATEMCS2:
159 rate_idx = 2;
160 break;
161 case DESC92C_RATEMCS3:
162 rate_idx = 3;
163 break;
164 case DESC92C_RATEMCS4:
165 rate_idx = 4;
166 break;
167 case DESC92C_RATEMCS5:
168 rate_idx = 5;
169 break;
170 case DESC92C_RATEMCS6:
171 rate_idx = 6;
172 break;
173 case DESC92C_RATEMCS7:
174 rate_idx = 7;
175 break;
176 case DESC92C_RATEMCS8:
177 rate_idx = 8;
178 break;
179 case DESC92C_RATEMCS9:
180 rate_idx = 9;
181 break;
182 case DESC92C_RATEMCS10:
183 rate_idx = 10;
184 break;
185 case DESC92C_RATEMCS11:
186 rate_idx = 11;
187 break;
188 case DESC92C_RATEMCS12:
189 rate_idx = 12;
190 break;
191 case DESC92C_RATEMCS13:
192 rate_idx = 13;
193 break;
194 case DESC92C_RATEMCS14:
195 rate_idx = 14;
196 break;
197 case DESC92C_RATEMCS15:
198 rate_idx = 15;
199 break;
200 default:
201 rate_idx = 0;
202 break;
203 }
204 }
205 return rate_idx;
206}
207
208static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, 50static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
209 struct rtl_stats *pstatus, u8 *pdesc, 51 struct rtl_stats *pstatus, u8 *pdesc,
210 struct rx_fwinfo_8723be *p_drvinfo, 52 struct rx_fwinfo_8723be *p_drvinfo,
@@ -558,8 +400,8 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
558 * supported rates or MCS index if HT rates 400 * supported rates or MCS index if HT rates
559 * are use (RX_FLAG_HT) 401 * are use (RX_FLAG_HT)
560 */ 402 */
561 rx_status->rate_idx = _rtl8723be_rate_mapping(hw, status->is_ht, 403 rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
562 status->rate); 404 false, status->rate);
563 405
564 rx_status->mactime = status->timestamp_low; 406 rx_status->mactime = status->timestamp_low;
565 if (phystatus) { 407 if (phystatus) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
index a730985ae81d..ee7c208bd070 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/def.h
@@ -373,60 +373,6 @@ enum rtl_desc_qsel {
373 QSLT_CMD = 0x13, 373 QSLT_CMD = 0x13,
374}; 374};
375 375
376enum rtl_desc8821ae_rate {
377 DESC_RATE1M = 0x00,
378 DESC_RATE2M = 0x01,
379 DESC_RATE5_5M = 0x02,
380 DESC_RATE11M = 0x03,
381
382 DESC_RATE6M = 0x04,
383 DESC_RATE9M = 0x05,
384 DESC_RATE12M = 0x06,
385 DESC_RATE18M = 0x07,
386 DESC_RATE24M = 0x08,
387 DESC_RATE36M = 0x09,
388 DESC_RATE48M = 0x0a,
389 DESC_RATE54M = 0x0b,
390
391 DESC_RATEMCS0 = 0x0c,
392 DESC_RATEMCS1 = 0x0d,
393 DESC_RATEMCS2 = 0x0e,
394 DESC_RATEMCS3 = 0x0f,
395 DESC_RATEMCS4 = 0x10,
396 DESC_RATEMCS5 = 0x11,
397 DESC_RATEMCS6 = 0x12,
398 DESC_RATEMCS7 = 0x13,
399 DESC_RATEMCS8 = 0x14,
400 DESC_RATEMCS9 = 0x15,
401 DESC_RATEMCS10 = 0x16,
402 DESC_RATEMCS11 = 0x17,
403 DESC_RATEMCS12 = 0x18,
404 DESC_RATEMCS13 = 0x19,
405 DESC_RATEMCS14 = 0x1a,
406 DESC_RATEMCS15 = 0x1b,
407
408 DESC_RATEVHT1SS_MCS0 = 0x2c,
409 DESC_RATEVHT1SS_MCS1 = 0x2d,
410 DESC_RATEVHT1SS_MCS2 = 0x2e,
411 DESC_RATEVHT1SS_MCS3 = 0x2f,
412 DESC_RATEVHT1SS_MCS4 = 0x30,
413 DESC_RATEVHT1SS_MCS5 = 0x31,
414 DESC_RATEVHT1SS_MCS6 = 0x32,
415 DESC_RATEVHT1SS_MCS7 = 0x33,
416 DESC_RATEVHT1SS_MCS8 = 0x34,
417 DESC_RATEVHT1SS_MCS9 = 0x35,
418 DESC_RATEVHT2SS_MCS0 = 0x36,
419 DESC_RATEVHT2SS_MCS1 = 0x37,
420 DESC_RATEVHT2SS_MCS2 = 0x38,
421 DESC_RATEVHT2SS_MCS3 = 0x39,
422 DESC_RATEVHT2SS_MCS4 = 0x3a,
423 DESC_RATEVHT2SS_MCS5 = 0x3b,
424 DESC_RATEVHT2SS_MCS6 = 0x3c,
425 DESC_RATEVHT2SS_MCS7 = 0x3d,
426 DESC_RATEVHT2SS_MCS8 = 0x3e,
427 DESC_RATEVHT2SS_MCS9 = 0x3f,
428};
429
430enum rx_packet_type { 376enum rx_packet_type {
431 NORMAL_RX, 377 NORMAL_RX,
432 TX_REPORT1, 378 TX_REPORT1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
index ba30b0d250fd..0b2082dc48f1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
@@ -26,6 +26,7 @@
26#include "../wifi.h" 26#include "../wifi.h"
27#include "../base.h" 27#include "../base.h"
28#include "../pci.h" 28#include "../pci.h"
29#include "../core.h"
29#include "reg.h" 30#include "reg.h"
30#include "def.h" 31#include "def.h"
31#include "phy.h" 32#include "phy.h"
@@ -519,34 +520,6 @@ void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(
519 } 520 }
520} 521}
521 522
522static void rtl8821ae_dm_diginit(struct ieee80211_hw *hw)
523{
524 struct rtl_priv *rtlpriv = rtl_priv(hw);
525 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
526
527 dm_digtable->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
528 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
529 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
530 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
531 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
532 dm_digtable->rx_gain_max = DM_DIG_MAX;
533 dm_digtable->rx_gain_min = DM_DIG_MIN;
534 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
535 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
536 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
537 dm_digtable->pre_cck_cca_thres = 0xff;
538 dm_digtable->cur_cck_cca_thres = 0x83;
539 dm_digtable->forbidden_igi = DM_DIG_MIN;
540 dm_digtable->large_fa_hit = 0;
541 dm_digtable->recover_cnt = 0;
542 dm_digtable->dig_dynamic_min = DM_DIG_MIN;
543 dm_digtable->dig_dynamic_min_1 = DM_DIG_MIN;
544 dm_digtable->media_connect_0 = false;
545 dm_digtable->media_connect_1 = false;
546 rtlpriv->dm.dm_initialgain_enable = true;
547 dm_digtable->bt30_cur_igi = 0x32;
548}
549
550void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw) 523void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
551{ 524{
552 struct rtl_priv *rtlpriv = rtl_priv(hw); 525 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -606,6 +579,7 @@ void rtl8821ae_dm_init(struct ieee80211_hw *hw)
606{ 579{
607 struct rtl_priv *rtlpriv = rtl_priv(hw); 580 struct rtl_priv *rtlpriv = rtl_priv(hw);
608 struct rtl_phy *rtlphy = &rtlpriv->phy; 581 struct rtl_phy *rtlphy = &rtlpriv->phy;
582 u32 cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f);
609 583
610 spin_lock(&rtlpriv->locks.iqk_lock); 584 spin_lock(&rtlpriv->locks.iqk_lock);
611 rtlphy->lck_inprogress = false; 585 rtlphy->lck_inprogress = false;
@@ -613,7 +587,7 @@ void rtl8821ae_dm_init(struct ieee80211_hw *hw)
613 587
614 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; 588 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
615 rtl8821ae_dm_common_info_self_init(hw); 589 rtl8821ae_dm_common_info_self_init(hw);
616 rtl8821ae_dm_diginit(hw); 590 rtl_dm_diginit(hw, cur_igvalue);
617 rtl8821ae_dm_init_rate_adaptive_mask(hw); 591 rtl8821ae_dm_init_rate_adaptive_mask(hw);
618 rtl8821ae_dm_init_edca_turbo(hw); 592 rtl8821ae_dm_init_edca_turbo(hw);
619 rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(hw); 593 rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(hw);
@@ -822,7 +796,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
822 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 796 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
823 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 797 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
824 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 798 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
825 u8 dig_dynamic_min; 799 u8 dig_min_0;
826 u8 dig_max_of_min; 800 u8 dig_max_of_min;
827 bool first_connect, first_disconnect; 801 bool first_connect, first_disconnect;
828 u8 dm_dig_max, dm_dig_min, offset; 802 u8 dm_dig_max, dm_dig_min, offset;
@@ -837,7 +811,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
837 } 811 }
838 812
839 /*add by Neil Chen to avoid PSD is processing*/ 813 /*add by Neil Chen to avoid PSD is processing*/
840 dig_dynamic_min = dm_digtable->dig_dynamic_min; 814 dig_min_0 = dm_digtable->dig_min_0;
841 first_connect = (mac->link_state >= MAC80211_LINKED) && 815 first_connect = (mac->link_state >= MAC80211_LINKED) &&
842 (!dm_digtable->media_connect_0); 816 (!dm_digtable->media_connect_0);
843 first_disconnect = (mac->link_state < MAC80211_LINKED) && 817 first_disconnect = (mac->link_state < MAC80211_LINKED) &&
@@ -876,23 +850,23 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
876 offset = 0; 850 offset = 0;
877 851
878 if (dm_digtable->rssi_val_min - offset < dm_dig_min) 852 if (dm_digtable->rssi_val_min - offset < dm_dig_min)
879 dig_dynamic_min = dm_dig_min; 853 dig_min_0 = dm_dig_min;
880 else if (dm_digtable->rssi_val_min - 854 else if (dm_digtable->rssi_val_min -
881 offset > dig_max_of_min) 855 offset > dig_max_of_min)
882 dig_dynamic_min = dig_max_of_min; 856 dig_min_0 = dig_max_of_min;
883 else 857 else
884 dig_dynamic_min = 858 dig_min_0 =
885 dm_digtable->rssi_val_min - offset; 859 dm_digtable->rssi_val_min - offset;
886 860
887 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 861 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
888 "bOneEntryOnly=TRUE, dig_dynamic_min=0x%x\n", 862 "bOneEntryOnly=TRUE, dig_min_0=0x%x\n",
889 dig_dynamic_min); 863 dig_min_0);
890 } else { 864 } else {
891 dig_dynamic_min = dm_dig_min; 865 dig_min_0 = dm_dig_min;
892 } 866 }
893 } else { 867 } else {
894 dm_digtable->rx_gain_max = dm_dig_max; 868 dm_digtable->rx_gain_max = dm_dig_max;
895 dig_dynamic_min = dm_dig_min; 869 dig_min_0 = dm_dig_min;
896 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 870 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
897 "No Link\n"); 871 "No Link\n");
898 } 872 }
@@ -925,11 +899,11 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
925 } else { 899 } else {
926 if (dm_digtable->large_fa_hit < 3) { 900 if (dm_digtable->large_fa_hit < 3) {
927 if ((dm_digtable->forbidden_igi - 1) < 901 if ((dm_digtable->forbidden_igi - 1) <
928 dig_dynamic_min) { 902 dig_min_0) {
929 dm_digtable->forbidden_igi = 903 dm_digtable->forbidden_igi =
930 dig_dynamic_min; 904 dig_min_0;
931 dm_digtable->rx_gain_min = 905 dm_digtable->rx_gain_min =
932 dig_dynamic_min; 906 dig_min_0;
933 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 907 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
934 "Normal Case: At Lower Bound\n"); 908 "Normal Case: At Lower Bound\n");
935 } else { 909 } else {
@@ -1024,7 +998,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
1024 rtl8821ae_dm_write_dig(hw, current_igi); 998 rtl8821ae_dm_write_dig(hw, current_igi);
1025 dm_digtable->media_connect_0 = 999 dm_digtable->media_connect_0 =
1026 ((mac->link_state >= MAC80211_LINKED) ? true : false); 1000 ((mac->link_state >= MAC80211_LINKED) ? true : false);
1027 dm_digtable->dig_dynamic_min = dig_dynamic_min; 1001 dm_digtable->dig_min_0 = dig_min_0;
1028} 1002}
1029 1003
1030static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw) 1004static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h
index 9dd40dd316c1..625a6bbb21fc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h
@@ -187,28 +187,12 @@
187#define BW_AUTO_SWITCH_HIGH_LOW 25 187#define BW_AUTO_SWITCH_HIGH_LOW 25
188#define BW_AUTO_SWITCH_LOW_HIGH 30 188#define BW_AUTO_SWITCH_LOW_HIGH 30
189 189
190#define DM_DIG_THRESH_HIGH 40
191#define DM_DIG_THRESH_LOW 35
192
193#define DM_FALSEALARM_THRESH_LOW 400
194#define DM_FALSEALARM_THRESH_HIGH 1000
195
196#define DM_DIG_MAX 0x3e
197#define DM_DIG_MIN 0x1e
198
199#define DM_DIG_MAX_AP 0x32
200#define DM_DIG_MIN_AP 0x20
201
202#define DM_DIG_FA_UPPER 0x3e 190#define DM_DIG_FA_UPPER 0x3e
203#define DM_DIG_FA_LOWER 0x1e 191#define DM_DIG_FA_LOWER 0x1e
204#define DM_DIG_FA_TH0 200 192#define DM_DIG_FA_TH0 200
205#define DM_DIG_FA_TH1 0x300 193#define DM_DIG_FA_TH1 0x300
206#define DM_DIG_FA_TH2 0x400 194#define DM_DIG_FA_TH2 0x400
207 195
208#define DM_DIG_BACKOFF_MAX 12
209#define DM_DIG_BACKOFF_MIN -4
210#define DM_DIG_BACKOFF_DEFAULT 10
211
212#define RXPATHSELECTION_SS_TH_LOW 30 196#define RXPATHSELECTION_SS_TH_LOW 30
213#define RXPATHSELECTION_DIFF_TH 18 197#define RXPATHSELECTION_DIFF_TH 18
214 198
@@ -262,14 +246,6 @@ enum tag_dynamic_init_gain_operation_type_definition {
262 DIG_OP_TYPE_MAX 246 DIG_OP_TYPE_MAX
263}; 247};
264 248
265enum tag_cck_packet_detection_threshold_type_definition {
266 CCK_PD_STAGE_LOWRSSI = 0,
267 CCK_PD_STAGE_HIGHRSSI = 1,
268 CCK_FA_STAGE_LOW = 2,
269 CCK_FA_STAGE_HIGH = 3,
270 CCK_PD_STAGE_MAX = 4,
271};
272
273enum dm_1r_cca_e { 249enum dm_1r_cca_e {
274 CCA_1R = 0, 250 CCA_1R = 0,
275 CCA_2R = 1, 251 CCA_2R = 1,
@@ -288,23 +264,6 @@ enum dm_sw_ant_switch_e {
288 ANS_ANTENNA_MAX = 3, 264 ANS_ANTENNA_MAX = 3,
289}; 265};
290 266
291enum dm_dig_ext_port_alg_e {
292 DIG_EXT_PORT_STAGE_0 = 0,
293 DIG_EXT_PORT_STAGE_1 = 1,
294 DIG_EXT_PORT_STAGE_2 = 2,
295 DIG_EXT_PORT_STAGE_3 = 3,
296 DIG_EXT_PORT_STAGE_MAX = 4,
297};
298
299enum dm_dig_connect_e {
300 DIG_STA_DISCONNECT = 0,
301 DIG_STA_CONNECT = 1,
302 DIG_STA_BEFORE_CONNECT = 2,
303 DIG_MULTISTA_DISCONNECT = 3,
304 DIG_MULTISTA_CONNECT = 4,
305 DIG_CONNECT_MAX
306};
307
308enum pwr_track_control_method { 267enum pwr_track_control_method {
309 BBSWING, 268 BBSWING,
310 TXAGC, 269 TXAGC,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
index bf0b0ce9519c..36b3e91d996e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
@@ -93,9 +93,9 @@
93 93
94#define RTL8812_TRANS_CARDEMU_TO_SUS \ 94#define RTL8812_TRANS_CARDEMU_TO_SUS \
95 {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\ 95 {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,\
96 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xcc}, \ 96 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xc0}, \
97 {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\ 97 {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,\
98 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xEC}, \ 98 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xF0, 0xE0}, \
99 {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ 99 {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\
100 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x07 \ 100 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x07 \
101 /* gpio11 input mode, gpio10~8 output mode */}, \ 101 /* gpio11 input mode, gpio10~8 output mode */}, \
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
index fc92dd6a0d07..a4988121e1ab 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
@@ -85,52 +85,6 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
85 rtlpci->const_support_pciaspm = 1; 85 rtlpci->const_support_pciaspm = 1;
86} 86}
87 87
88static void load_wowlan_fw(struct rtl_priv *rtlpriv)
89{
90 /* callback routine to load wowlan firmware after main fw has
91 * been loaded
92 */
93 const struct firmware *wowlan_firmware;
94 char *fw_name = NULL;
95 int err;
96
97 /* for wowlan firmware buf */
98 rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
99 if (!rtlpriv->rtlhal.wowlan_firmware) {
100 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
101 "Can't alloc buffer for wowlan fw.\n");
102 return;
103 }
104
105 if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8821AE)
106 fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
107 else
108 fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
109 err = request_firmware(&wowlan_firmware, fw_name, rtlpriv->io.dev);
110 if (err) {
111 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
112 "Failed to request wowlan firmware!\n");
113 goto error;
114 }
115
116 if (wowlan_firmware->size > 0x8000) {
117 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
118 "Wowlan Firmware is too big!\n");
119 goto error;
120 }
121
122 memcpy(rtlpriv->rtlhal.wowlan_firmware, wowlan_firmware->data,
123 wowlan_firmware->size);
124 rtlpriv->rtlhal.wowlan_fwsize = wowlan_firmware->size;
125 release_firmware(wowlan_firmware);
126
127 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "WOWLAN FirmwareDownload OK\n");
128 return;
129error:
130 release_firmware(wowlan_firmware);
131 vfree(rtlpriv->rtlhal.wowlan_firmware);
132}
133
134/*InitializeVariables8812E*/ 88/*InitializeVariables8812E*/
135int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) 89int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
136{ 90{
@@ -231,7 +185,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
231 else if (rtlpriv->psc.reg_fwctrl_lps == 3) 185 else if (rtlpriv->psc.reg_fwctrl_lps == 3)
232 rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; 186 rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
233 187
234 rtlpriv->rtl_fw_second_cb = load_wowlan_fw;
235 /* for firmware buf */ 188 /* for firmware buf */
236 rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); 189 rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
237 if (!rtlpriv->rtlhal.pfirmware) { 190 if (!rtlpriv->rtlhal.pfirmware) {
@@ -239,20 +192,41 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
239 "Can't alloc buffer for fw.\n"); 192 "Can't alloc buffer for fw.\n");
240 return 1; 193 return 1;
241 } 194 }
195 rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
196 if (!rtlpriv->rtlhal.wowlan_firmware) {
197 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
198 "Can't alloc buffer for wowlan fw.\n");
199 return 1;
200 }
242 201
243 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) 202 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
244 rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin"; 203 rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin";
245 else 204 rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
205 } else {
246 rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin"; 206 rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin";
207 rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
208 }
247 209
248 rtlpriv->max_fw_size = 0x8000; 210 rtlpriv->max_fw_size = 0x8000;
211 /*load normal firmware*/
249 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); 212 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
250 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, 213 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
251 rtlpriv->io.dev, GFP_KERNEL, hw, 214 rtlpriv->io.dev, GFP_KERNEL, hw,
252 rtl_fw_cb); 215 rtl_fw_cb);
253 if (err) { 216 if (err) {
254 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 217 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
255 "Failed to request firmware!\n"); 218 "Failed to request normal firmware!\n");
219 return 1;
220 }
221 /*load wowlan firmware*/
222 pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name);
223 err = request_firmware_nowait(THIS_MODULE, 1,
224 rtlpriv->cfg->wowlan_fw_name,
225 rtlpriv->io.dev, GFP_KERNEL, hw,
226 rtl_wowlan_fw_cb);
227 if (err) {
228 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
229 "Failed to request wowlan firmware!\n");
256 return 1; 230 return 1;
257 } 231 }
258 return 0; 232 return 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
index 383b86b05cba..72af4b9ee32b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
@@ -48,232 +48,6 @@ static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
48 return skb->priority; 48 return skb->priority;
49} 49}
50 50
51/* mac80211's rate_idx is like this:
52 *
53 * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
54 *
55 * B/G rate:
56 * (rx_status->flag & RX_FLAG_HT) = 0,
57 * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11,
58 *
59 * N rate:
60 * (rx_status->flag & RX_FLAG_HT) = 1,
61 * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
62 *
63 * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
64 * A rate:
65 * (rx_status->flag & RX_FLAG_HT) = 0,
66 * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
67 *
68 * N rate:
69 * (rx_status->flag & RX_FLAG_HT) = 1,
70 * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
71 */
72static int _rtl8821ae_rate_mapping(struct ieee80211_hw *hw,
73 bool isht, bool isvht, u8 desc_rate)
74{
75 int rate_idx;
76
77 if (!isht) {
78 if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
79 switch (desc_rate) {
80 case DESC_RATE1M:
81 rate_idx = 0;
82 break;
83 case DESC_RATE2M:
84 rate_idx = 1;
85 break;
86 case DESC_RATE5_5M:
87 rate_idx = 2;
88 break;
89 case DESC_RATE11M:
90 rate_idx = 3;
91 break;
92 case DESC_RATE6M:
93 rate_idx = 4;
94 break;
95 case DESC_RATE9M:
96 rate_idx = 5;
97 break;
98 case DESC_RATE12M:
99 rate_idx = 6;
100 break;
101 case DESC_RATE18M:
102 rate_idx = 7;
103 break;
104 case DESC_RATE24M:
105 rate_idx = 8;
106 break;
107 case DESC_RATE36M:
108 rate_idx = 9;
109 break;
110 case DESC_RATE48M:
111 rate_idx = 10;
112 break;
113 case DESC_RATE54M:
114 rate_idx = 11;
115 break;
116 default:
117 rate_idx = 0;
118 break;
119 }
120 } else {
121 switch (desc_rate) {
122 case DESC_RATE6M:
123 rate_idx = 0;
124 break;
125 case DESC_RATE9M:
126 rate_idx = 1;
127 break;
128 case DESC_RATE12M:
129 rate_idx = 2;
130 break;
131 case DESC_RATE18M:
132 rate_idx = 3;
133 break;
134 case DESC_RATE24M:
135 rate_idx = 4;
136 break;
137 case DESC_RATE36M:
138 rate_idx = 5;
139 break;
140 case DESC_RATE48M:
141 rate_idx = 6;
142 break;
143 case DESC_RATE54M:
144 rate_idx = 7;
145 break;
146 default:
147 rate_idx = 0;
148 break;
149 }
150 }
151 } else {
152 switch (desc_rate) {
153 case DESC_RATEMCS0:
154 rate_idx = 0;
155 break;
156 case DESC_RATEMCS1:
157 rate_idx = 1;
158 break;
159 case DESC_RATEMCS2:
160 rate_idx = 2;
161 break;
162 case DESC_RATEMCS3:
163 rate_idx = 3;
164 break;
165 case DESC_RATEMCS4:
166 rate_idx = 4;
167 break;
168 case DESC_RATEMCS5:
169 rate_idx = 5;
170 break;
171 case DESC_RATEMCS6:
172 rate_idx = 6;
173 break;
174 case DESC_RATEMCS7:
175 rate_idx = 7;
176 break;
177 case DESC_RATEMCS8:
178 rate_idx = 8;
179 break;
180 case DESC_RATEMCS9:
181 rate_idx = 9;
182 break;
183 case DESC_RATEMCS10:
184 rate_idx = 10;
185 break;
186 case DESC_RATEMCS11:
187 rate_idx = 11;
188 break;
189 case DESC_RATEMCS12:
190 rate_idx = 12;
191 break;
192 case DESC_RATEMCS13:
193 rate_idx = 13;
194 break;
195 case DESC_RATEMCS14:
196 rate_idx = 14;
197 break;
198 case DESC_RATEMCS15:
199 rate_idx = 15;
200 break;
201 default:
202 rate_idx = 0;
203 break;
204 }
205 }
206
207 if (isvht) {
208 switch (desc_rate) {
209 case DESC_RATEVHT1SS_MCS0:
210 rate_idx = 0;
211 break;
212 case DESC_RATEVHT1SS_MCS1:
213 rate_idx = 1;
214 break;
215 case DESC_RATEVHT1SS_MCS2:
216 rate_idx = 2;
217 break;
218 case DESC_RATEVHT1SS_MCS3:
219 rate_idx = 3;
220 break;
221 case DESC_RATEVHT1SS_MCS4:
222 rate_idx = 4;
223 break;
224 case DESC_RATEVHT1SS_MCS5:
225 rate_idx = 5;
226 break;
227 case DESC_RATEVHT1SS_MCS6:
228 rate_idx = 6;
229 break;
230 case DESC_RATEVHT1SS_MCS7:
231 rate_idx = 7;
232 break;
233 case DESC_RATEVHT1SS_MCS8:
234 rate_idx = 8;
235 break;
236 case DESC_RATEVHT1SS_MCS9:
237 rate_idx = 9;
238 break;
239 case DESC_RATEVHT2SS_MCS0:
240 rate_idx = 0;
241 break;
242 case DESC_RATEVHT2SS_MCS1:
243 rate_idx = 1;
244 break;
245 case DESC_RATEVHT2SS_MCS2:
246 rate_idx = 2;
247 break;
248 case DESC_RATEVHT2SS_MCS3:
249 rate_idx = 3;
250 break;
251 case DESC_RATEVHT2SS_MCS4:
252 rate_idx = 4;
253 break;
254 case DESC_RATEVHT2SS_MCS5:
255 rate_idx = 5;
256 break;
257 case DESC_RATEVHT2SS_MCS6:
258 rate_idx = 6;
259 break;
260 case DESC_RATEVHT2SS_MCS7:
261 rate_idx = 7;
262 break;
263 case DESC_RATEVHT2SS_MCS8:
264 rate_idx = 8;
265 break;
266 case DESC_RATEVHT2SS_MCS9:
267 rate_idx = 9;
268 break;
269 default:
270 rate_idx = 0;
271 break;
272 }
273 }
274 return rate_idx;
275}
276
277static u16 odm_cfo(char value) 51static u16 odm_cfo(char value)
278{ 52{
279 int ret_val; 53 int ret_val;
@@ -766,9 +540,9 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
766 * supported rates or MCS index if HT rates 540 * supported rates or MCS index if HT rates
767 * are use (RX_FLAG_HT) 541 * are use (RX_FLAG_HT)
768 */ 542 */
769 rx_status->rate_idx = 543 rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
770 _rtl8821ae_rate_mapping(hw, status->is_ht, 544 status->is_vht,
771 status->is_vht, status->rate); 545 status->rate);
772 546
773 rx_status->mactime = status->timestamp_low; 547 rx_status->mactime = status->timestamp_low;
774 if (phystatus) { 548 if (phystatus) {
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 6866dcf24340..51572912c53d 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -331,10 +331,10 @@ enum hardware_type {
331(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal)) 331(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
332 332
333#define RX_HAL_IS_CCK_RATE(rxmcs) \ 333#define RX_HAL_IS_CCK_RATE(rxmcs) \
334 ((rxmcs) == DESC92_RATE1M || \ 334 ((rxmcs) == DESC_RATE1M || \
335 (rxmcs) == DESC92_RATE2M || \ 335 (rxmcs) == DESC_RATE2M || \
336 (rxmcs) == DESC92_RATE5_5M || \ 336 (rxmcs) == DESC_RATE5_5M || \
337 (rxmcs) == DESC92_RATE11M) 337 (rxmcs) == DESC_RATE11M)
338 338
339enum scan_operation_backup_opt { 339enum scan_operation_backup_opt {
340 SCAN_OPT_BACKUP = 0, 340 SCAN_OPT_BACKUP = 0,
@@ -579,38 +579,59 @@ enum rtl_hal_state {
579}; 579};
580 580
581enum rtl_desc92_rate { 581enum rtl_desc92_rate {
582 DESC92_RATE1M = 0x00, 582 DESC_RATE1M = 0x00,
583 DESC92_RATE2M = 0x01, 583 DESC_RATE2M = 0x01,
584 DESC92_RATE5_5M = 0x02, 584 DESC_RATE5_5M = 0x02,
585 DESC92_RATE11M = 0x03, 585 DESC_RATE11M = 0x03,
586 586
587 DESC92_RATE6M = 0x04, 587 DESC_RATE6M = 0x04,
588 DESC92_RATE9M = 0x05, 588 DESC_RATE9M = 0x05,
589 DESC92_RATE12M = 0x06, 589 DESC_RATE12M = 0x06,
590 DESC92_RATE18M = 0x07, 590 DESC_RATE18M = 0x07,
591 DESC92_RATE24M = 0x08, 591 DESC_RATE24M = 0x08,
592 DESC92_RATE36M = 0x09, 592 DESC_RATE36M = 0x09,
593 DESC92_RATE48M = 0x0a, 593 DESC_RATE48M = 0x0a,
594 DESC92_RATE54M = 0x0b, 594 DESC_RATE54M = 0x0b,
595 595
596 DESC92_RATEMCS0 = 0x0c, 596 DESC_RATEMCS0 = 0x0c,
597 DESC92_RATEMCS1 = 0x0d, 597 DESC_RATEMCS1 = 0x0d,
598 DESC92_RATEMCS2 = 0x0e, 598 DESC_RATEMCS2 = 0x0e,
599 DESC92_RATEMCS3 = 0x0f, 599 DESC_RATEMCS3 = 0x0f,
600 DESC92_RATEMCS4 = 0x10, 600 DESC_RATEMCS4 = 0x10,
601 DESC92_RATEMCS5 = 0x11, 601 DESC_RATEMCS5 = 0x11,
602 DESC92_RATEMCS6 = 0x12, 602 DESC_RATEMCS6 = 0x12,
603 DESC92_RATEMCS7 = 0x13, 603 DESC_RATEMCS7 = 0x13,
604 DESC92_RATEMCS8 = 0x14, 604 DESC_RATEMCS8 = 0x14,
605 DESC92_RATEMCS9 = 0x15, 605 DESC_RATEMCS9 = 0x15,
606 DESC92_RATEMCS10 = 0x16, 606 DESC_RATEMCS10 = 0x16,
607 DESC92_RATEMCS11 = 0x17, 607 DESC_RATEMCS11 = 0x17,
608 DESC92_RATEMCS12 = 0x18, 608 DESC_RATEMCS12 = 0x18,
609 DESC92_RATEMCS13 = 0x19, 609 DESC_RATEMCS13 = 0x19,
610 DESC92_RATEMCS14 = 0x1a, 610 DESC_RATEMCS14 = 0x1a,
611 DESC92_RATEMCS15 = 0x1b, 611 DESC_RATEMCS15 = 0x1b,
612 DESC92_RATEMCS15_SG = 0x1c, 612 DESC_RATEMCS15_SG = 0x1c,
613 DESC92_RATEMCS32 = 0x20, 613 DESC_RATEMCS32 = 0x20,
614
615 DESC_RATEVHT1SS_MCS0 = 0x2c,
616 DESC_RATEVHT1SS_MCS1 = 0x2d,
617 DESC_RATEVHT1SS_MCS2 = 0x2e,
618 DESC_RATEVHT1SS_MCS3 = 0x2f,
619 DESC_RATEVHT1SS_MCS4 = 0x30,
620 DESC_RATEVHT1SS_MCS5 = 0x31,
621 DESC_RATEVHT1SS_MCS6 = 0x32,
622 DESC_RATEVHT1SS_MCS7 = 0x33,
623 DESC_RATEVHT1SS_MCS8 = 0x34,
624 DESC_RATEVHT1SS_MCS9 = 0x35,
625 DESC_RATEVHT2SS_MCS0 = 0x36,
626 DESC_RATEVHT2SS_MCS1 = 0x37,
627 DESC_RATEVHT2SS_MCS2 = 0x38,
628 DESC_RATEVHT2SS_MCS3 = 0x39,
629 DESC_RATEVHT2SS_MCS4 = 0x3a,
630 DESC_RATEVHT2SS_MCS5 = 0x3b,
631 DESC_RATEVHT2SS_MCS6 = 0x3c,
632 DESC_RATEVHT2SS_MCS7 = 0x3d,
633 DESC_RATEVHT2SS_MCS8 = 0x3e,
634 DESC_RATEVHT2SS_MCS9 = 0x3f,
614}; 635};
615 636
616enum rtl_var_map { 637enum rtl_var_map {
@@ -2161,6 +2182,7 @@ struct rtl_hal_ops {
2161 void (*add_wowlan_pattern)(struct ieee80211_hw *hw, 2182 void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
2162 struct rtl_wow_pattern *rtl_pattern, 2183 struct rtl_wow_pattern *rtl_pattern,
2163 u8 index); 2184 u8 index);
2185 u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx);
2164}; 2186};
2165 2187
2166struct rtl_intf_ops { 2188struct rtl_intf_ops {
@@ -2242,6 +2264,7 @@ struct rtl_hal_cfg {
2242 char *name; 2264 char *name;
2243 char *fw_name; 2265 char *fw_name;
2244 char *alt_fw_name; 2266 char *alt_fw_name;
2267 char *wowlan_fw_name;
2245 struct rtl_hal_ops *ops; 2268 struct rtl_hal_ops *ops;
2246 struct rtl_mod_params *mod_params; 2269 struct rtl_mod_params *mod_params;
2247 struct rtl_hal_usbint_cfg *usb_interface_cfg; 2270 struct rtl_hal_usbint_cfg *usb_interface_cfg;
@@ -2390,8 +2413,6 @@ struct dig_t {
2390 u8 pre_ccastate; 2413 u8 pre_ccastate;
2391 u8 cur_ccasate; 2414 u8 cur_ccasate;
2392 u8 large_fa_hit; 2415 u8 large_fa_hit;
2393 u8 dig_dynamic_min;
2394 u8 dig_dynamic_min_1;
2395 u8 forbidden_igi; 2416 u8 forbidden_igi;
2396 u8 dig_state; 2417 u8 dig_state;
2397 u8 dig_highpwrstate; 2418 u8 dig_highpwrstate;
@@ -2518,8 +2539,6 @@ struct proxim {
2518 2539
2519struct rtl_priv { 2540struct rtl_priv {
2520 struct ieee80211_hw *hw; 2541 struct ieee80211_hw *hw;
2521 /* Used to load a second firmware */
2522 void (*rtl_fw_second_cb)(struct rtl_priv *rtlpriv);
2523 struct completion firmware_loading_complete; 2542 struct completion firmware_loading_complete;
2524 struct list_head list; 2543 struct list_head list;
2525 struct rtl_priv *buddy_priv; 2544 struct rtl_priv *buddy_priv;
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 0b30a7b4d663..d4ba009ac9aa 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -500,6 +500,7 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
500 int ret = 0; 500 int ret = 0;
501 501
502 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 502 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
503 IEEE80211_VIF_SUPPORTS_UAPSD |
503 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 504 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
504 505
505 wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 506 wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
@@ -1480,9 +1481,7 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1480 /* unit us */ 1481 /* unit us */
1481 /* FIXME: find a proper value */ 1482 /* FIXME: find a proper value */
1482 1483
1483 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1484 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS;
1484 IEEE80211_HW_SUPPORTS_PS |
1485 IEEE80211_HW_SUPPORTS_UAPSD;
1486 1485
1487 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 1486 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1488 BIT(NL80211_IFTYPE_ADHOC); 1487 BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index d6d0d6d9c7a8..144d1f8ba473 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -250,6 +250,7 @@ static struct wlcore_conf wl12xx_conf = {
250 .keep_alive_interval = 55000, 250 .keep_alive_interval = 55000,
251 .max_listen_interval = 20, 251 .max_listen_interval = 20,
252 .sta_sleep_auth = WL1271_PSM_ILLEGAL, 252 .sta_sleep_auth = WL1271_PSM_ILLEGAL,
253 .suspend_rx_ba_activity = 0,
253 }, 254 },
254 .itrim = { 255 .itrim = {
255 .enable = false, 256 .enable = false,
@@ -1728,6 +1729,9 @@ static struct wlcore_ops wl12xx_ops = {
1728 .convert_hwaddr = wl12xx_convert_hwaddr, 1729 .convert_hwaddr = wl12xx_convert_hwaddr,
1729 .lnk_high_prio = wl12xx_lnk_high_prio, 1730 .lnk_high_prio = wl12xx_lnk_high_prio,
1730 .lnk_low_prio = wl12xx_lnk_low_prio, 1731 .lnk_low_prio = wl12xx_lnk_low_prio,
1732 .interrupt_notify = NULL,
1733 .rx_ba_filter = NULL,
1734 .ap_sleep = NULL,
1731}; 1735};
1732 1736
1733static struct ieee80211_sta_ht_cap wl12xx_ht_cap = { 1737static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
index a169bb5a5dbf..67f2a0eec854 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.c
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -24,6 +24,7 @@
24#include "../wlcore/acx.h" 24#include "../wlcore/acx.h"
25 25
26#include "acx.h" 26#include "acx.h"
27#include "wl18xx.h"
27 28
28int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap, 29int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
29 u32 sdio_blk_size, u32 extra_mem_blks, 30 u32 sdio_blk_size, u32 extra_mem_blks,
@@ -194,3 +195,90 @@ out:
194 kfree(acx); 195 kfree(acx);
195 return ret; 196 return ret;
196} 197}
198
199/*
200 * When the host is suspended, we don't want to get any fast-link/PSM
201 * notifications
202 */
203int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl,
204 bool action)
205{
206 struct wl18xx_acx_interrupt_notify *acx;
207 int ret = 0;
208
209 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
210 if (!acx) {
211 ret = -ENOMEM;
212 goto out;
213 }
214
215 acx->enable = action;
216 ret = wl1271_cmd_configure(wl, ACX_INTERRUPT_NOTIFY, acx, sizeof(*acx));
217 if (ret < 0) {
218 wl1271_warning("acx interrupt notify setting failed: %d", ret);
219 goto out;
220 }
221
222out:
223 kfree(acx);
224 return ret;
225}
226
227/*
228 * When the host is suspended, we can configure the FW to disable RX BA
229 * notifications.
230 */
231int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action)
232{
233 struct wl18xx_acx_rx_ba_filter *acx;
234 int ret = 0;
235
236 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
237 if (!acx) {
238 ret = -ENOMEM;
239 goto out;
240 }
241
242 acx->enable = (u32)action;
243 ret = wl1271_cmd_configure(wl, ACX_RX_BA_FILTER, acx, sizeof(*acx));
244 if (ret < 0) {
245 wl1271_warning("acx rx ba activity filter setting failed: %d",
246 ret);
247 goto out;
248 }
249
250out:
251 kfree(acx);
252 return ret;
253}
254
255int wl18xx_acx_ap_sleep(struct wl1271 *wl)
256{
257 struct wl18xx_priv *priv = wl->priv;
258 struct acx_ap_sleep_cfg *acx;
259 struct conf_ap_sleep_settings *conf = &priv->conf.ap_sleep;
260 int ret;
261
262 wl1271_debug(DEBUG_ACX, "acx config ap sleep");
263
264 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
265 if (!acx) {
266 ret = -ENOMEM;
267 goto out;
268 }
269
270 acx->idle_duty_cycle = conf->idle_duty_cycle;
271 acx->connected_duty_cycle = conf->connected_duty_cycle;
272 acx->max_stations_thresh = conf->max_stations_thresh;
273 acx->idle_conn_thresh = conf->idle_conn_thresh;
274
275 ret = wl1271_cmd_configure(wl, ACX_AP_SLEEP_CFG, acx, sizeof(*acx));
276 if (ret < 0) {
277 wl1271_warning("acx config ap-sleep failed: %d", ret);
278 goto out;
279 }
280
281out:
282 kfree(acx);
283 return ret;
284}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
index 0e636def1217..4afccd4b9467 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.h
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -32,7 +32,10 @@ enum {
32 ACX_SIM_CONFIG = 0x0053, 32 ACX_SIM_CONFIG = 0x0053,
33 ACX_CLEAR_STATISTICS = 0x0054, 33 ACX_CLEAR_STATISTICS = 0x0054,
34 ACX_AUTO_RX_STREAMING = 0x0055, 34 ACX_AUTO_RX_STREAMING = 0x0055,
35 ACX_PEER_CAP = 0x0056 35 ACX_PEER_CAP = 0x0056,
36 ACX_INTERRUPT_NOTIFY = 0x0057,
37 ACX_RX_BA_FILTER = 0x0058,
38 ACX_AP_SLEEP_CFG = 0x0059
36}; 39};
37 40
38/* numbers of bits the length field takes (add 1 for the actual number) */ 41/* numbers of bits the length field takes (add 1 for the actual number) */
@@ -326,6 +329,44 @@ struct wlcore_acx_peer_cap {
326 u8 padding; 329 u8 padding;
327} __packed; 330} __packed;
328 331
332/*
333 * ACX_INTERRUPT_NOTIFY
334 * enable/disable fast-link/PSM notification from FW
335 */
336struct wl18xx_acx_interrupt_notify {
337 struct acx_header header;
338 u32 enable;
339};
340
341/*
342 * ACX_RX_BA_FILTER
343 * enable/disable RX BA filtering in FW
344 */
345struct wl18xx_acx_rx_ba_filter {
346 struct acx_header header;
347 u32 enable;
348};
349
350struct acx_ap_sleep_cfg {
351 struct acx_header header;
352 /* Duty Cycle (20-80% of staying Awake) for IDLE AP
353 * (0: disable)
354 */
355 u8 idle_duty_cycle;
356 /* Duty Cycle (20-80% of staying Awake) for Connected AP
357 * (0: disable)
358 */
359 u8 connected_duty_cycle;
360 /* Maximum stations that are allowed to be connected to AP
361 * (255: no limit)
362 */
363 u8 max_stations_thresh;
364 /* Timeout till enabling the Sleep Mechanism after data stops
365 * [unit: 100 msec]
366 */
367 u8 idle_conn_thresh;
368} __packed;
369
329int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap, 370int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
330 u32 sdio_blk_size, u32 extra_mem_blks, 371 u32 sdio_blk_size, u32 extra_mem_blks,
331 u32 len_field_size); 372 u32 len_field_size);
@@ -336,5 +377,8 @@ int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
336 struct ieee80211_sta_ht_cap *ht_cap, 377 struct ieee80211_sta_ht_cap *ht_cap,
337 bool allow_ht_operation, 378 bool allow_ht_operation,
338 u32 rate_set, u8 hlid); 379 u32 rate_set, u8 hlid);
380int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
381int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
382int wl18xx_acx_ap_sleep(struct wl1271 *wl);
339 383
340#endif /* __WL18XX_ACX_H__ */ 384#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
index 44f0b205b065..a8d176ddc73c 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.c
+++ b/drivers/net/wireless/ti/wl18xx/cmd.c
@@ -33,7 +33,8 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl,
33 u32 supported_rates; 33 u32 supported_rates;
34 int ret; 34 int ret;
35 35
36 wl1271_debug(DEBUG_ACX, "cmd channel switch"); 36 wl1271_debug(DEBUG_ACX, "cmd channel switch (count=%d)",
37 ch_switch->count);
37 38
38 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 39 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
39 if (!cmd) { 40 if (!cmd) {
@@ -60,8 +61,12 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl,
60 goto out_free; 61 goto out_free;
61 } 62 }
62 63
63 supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES | 64 supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES;
64 wlcore_hw_sta_get_ap_rate_mask(wl, wlvif); 65 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
66 supported_rates |= wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
67 else
68 supported_rates |=
69 wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
65 if (wlvif->p2p) 70 if (wlvif->p2p)
66 supported_rates &= ~CONF_TX_CCK_RATES; 71 supported_rates &= ~CONF_TX_CCK_RATES;
67 cmd->local_supported_rates = cpu_to_le32(supported_rates); 72 cmd->local_supported_rates = cpu_to_le32(supported_rates);
@@ -167,3 +172,85 @@ out_free:
167out: 172out:
168 return ret; 173 return ret;
169} 174}
175
176int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start)
177{
178 struct wlcore_cmd_cac_start *cmd;
179 int ret = 0;
180
181 wl1271_debug(DEBUG_CMD, "cmd cac (channel %d) %s",
182 wlvif->channel, start ? "start" : "stop");
183
184 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
185 if (!cmd)
186 return -ENOMEM;
187
188 cmd->role_id = wlvif->role_id;
189 cmd->channel = wlvif->channel;
190 if (wlvif->band == IEEE80211_BAND_5GHZ)
191 cmd->band = WLCORE_BAND_5GHZ;
192 cmd->bandwidth = wlcore_get_native_channel_type(wlvif->channel_type);
193
194 ret = wl1271_cmd_send(wl,
195 start ? CMD_CAC_START : CMD_CAC_STOP,
196 cmd, sizeof(*cmd), 0);
197 if (ret < 0) {
198 wl1271_error("failed to send cac command");
199 goto out_free;
200 }
201
202out_free:
203 kfree(cmd);
204 return ret;
205}
206
207int wl18xx_cmd_radar_detection_debug(struct wl1271 *wl, u8 channel)
208{
209 struct wl18xx_cmd_dfs_radar_debug *cmd;
210 int ret = 0;
211
212 wl1271_debug(DEBUG_CMD, "cmd radar detection debug (chan %d)",
213 channel);
214
215 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
216 if (!cmd)
217 return -ENOMEM;
218
219 cmd->channel = channel;
220
221 ret = wl1271_cmd_send(wl, CMD_DFS_RADAR_DETECTION_DEBUG,
222 cmd, sizeof(*cmd), 0);
223 if (ret < 0) {
224 wl1271_error("failed to send radar detection debug command");
225 goto out_free;
226 }
227
228out_free:
229 kfree(cmd);
230 return ret;
231}
232
233int wl18xx_cmd_dfs_master_restart(struct wl1271 *wl, struct wl12xx_vif *wlvif)
234{
235 struct wl18xx_cmd_dfs_master_restart *cmd;
236 int ret = 0;
237
238 wl1271_debug(DEBUG_CMD, "cmd dfs master restart (role %d)",
239 wlvif->role_id);
240
241 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
242 if (!cmd)
243 return -ENOMEM;
244
245 cmd->role_id = wlvif->role_id;
246
247 ret = wl1271_cmd_send(wl, CMD_DFS_MASTER_RESTART,
248 cmd, sizeof(*cmd), 0);
249 if (ret < 0) {
250 wl1271_error("failed to send dfs master restart command");
251 goto out_free;
252 }
253out_free:
254 kfree(cmd);
255 return ret;
256}
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.h b/drivers/net/wireless/ti/wl18xx/cmd.h
index 92499e2dfa83..7f9440a2bff8 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.h
+++ b/drivers/net/wireless/ti/wl18xx/cmd.h
@@ -59,6 +59,30 @@ struct wl18xx_cmd_smart_config_set_group_key {
59 u8 key[16]; 59 u8 key[16];
60} __packed; 60} __packed;
61 61
62struct wl18xx_cmd_dfs_radar_debug {
63 struct wl1271_cmd_header header;
64
65 u8 channel;
66 u8 padding[3];
67} __packed;
68
69struct wl18xx_cmd_dfs_master_restart {
70 struct wl1271_cmd_header header;
71
72 u8 role_id;
73 u8 padding[3];
74} __packed;
75
76/* cac_start and cac_stop share the same params */
77struct wlcore_cmd_cac_start {
78 struct wl1271_cmd_header header;
79
80 u8 role_id;
81 u8 channel;
82 u8 band;
83 u8 bandwidth;
84} __packed;
85
62int wl18xx_cmd_channel_switch(struct wl1271 *wl, 86int wl18xx_cmd_channel_switch(struct wl1271 *wl,
63 struct wl12xx_vif *wlvif, 87 struct wl12xx_vif *wlvif,
64 struct ieee80211_channel_switch *ch_switch); 88 struct ieee80211_channel_switch *ch_switch);
@@ -66,4 +90,7 @@ int wl18xx_cmd_smart_config_start(struct wl1271 *wl, u32 group_bitmap);
66int wl18xx_cmd_smart_config_stop(struct wl1271 *wl); 90int wl18xx_cmd_smart_config_stop(struct wl1271 *wl);
67int wl18xx_cmd_smart_config_set_group_key(struct wl1271 *wl, u16 group_id, 91int wl18xx_cmd_smart_config_set_group_key(struct wl1271 *wl, u16 group_id,
68 u8 key_len, u8 *key); 92 u8 key_len, u8 *key);
93int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start);
94int wl18xx_cmd_radar_detection_debug(struct wl1271 *wl, u8 channel);
95int wl18xx_cmd_dfs_master_restart(struct wl1271 *wl, struct wl12xx_vif *wlvif);
69#endif 96#endif
diff --git a/drivers/net/wireless/ti/wl18xx/conf.h b/drivers/net/wireless/ti/wl18xx/conf.h
index e34302e3b51d..71f1ec448ba5 100644
--- a/drivers/net/wireless/ti/wl18xx/conf.h
+++ b/drivers/net/wireless/ti/wl18xx/conf.h
@@ -23,7 +23,7 @@
23#define __WL18XX_CONF_H__ 23#define __WL18XX_CONF_H__
24 24
25#define WL18XX_CONF_MAGIC 0x10e100ca 25#define WL18XX_CONF_MAGIC 0x10e100ca
26#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0006) 26#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0007)
27#define WL18XX_CONF_MASK 0x0000ffff 27#define WL18XX_CONF_MASK 0x0000ffff
28#define WL18XX_CONF_SIZE (WLCORE_CONF_SIZE + \ 28#define WL18XX_CONF_SIZE (WLCORE_CONF_SIZE + \
29 sizeof(struct wl18xx_priv_conf)) 29 sizeof(struct wl18xx_priv_conf))
@@ -110,12 +110,33 @@ struct wl18xx_ht_settings {
110 u8 mode; 110 u8 mode;
111} __packed; 111} __packed;
112 112
113struct conf_ap_sleep_settings {
114 /* Duty Cycle (20-80% of staying Awake) for IDLE AP
115 * (0: disable)
116 */
117 u8 idle_duty_cycle;
118 /* Duty Cycle (20-80% of staying Awake) for Connected AP
119 * (0: disable)
120 */
121 u8 connected_duty_cycle;
122 /* Maximum stations that are allowed to be connected to AP
123 * (255: no limit)
124 */
125 u8 max_stations_thresh;
126 /* Timeout till enabling the Sleep Mechanism after data stops
127 * [unit: 100 msec]
128 */
129 u8 idle_conn_thresh;
130} __packed;
131
113struct wl18xx_priv_conf { 132struct wl18xx_priv_conf {
114 /* Module params structures */ 133 /* Module params structures */
115 struct wl18xx_ht_settings ht; 134 struct wl18xx_ht_settings ht;
116 135
117 /* this structure is copied wholesale to FW */ 136 /* this structure is copied wholesale to FW */
118 struct wl18xx_mac_and_phy_params phy; 137 struct wl18xx_mac_and_phy_params phy;
138
139 struct conf_ap_sleep_settings ap_sleep;
119} __packed; 140} __packed;
120 141
121#endif /* __WL18XX_CONF_H__ */ 142#endif /* __WL18XX_CONF_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 7f1669cdea09..c93fae95baac 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -22,9 +22,12 @@
22 22
23#include "../wlcore/debugfs.h" 23#include "../wlcore/debugfs.h"
24#include "../wlcore/wlcore.h" 24#include "../wlcore/wlcore.h"
25#include "../wlcore/debug.h"
26#include "../wlcore/ps.h"
25 27
26#include "wl18xx.h" 28#include "wl18xx.h"
27#include "acx.h" 29#include "acx.h"
30#include "cmd.h"
28#include "debugfs.h" 31#include "debugfs.h"
29 32
30#define WL18XX_DEBUGFS_FWSTATS_FILE(a, b, c) \ 33#define WL18XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
@@ -239,6 +242,45 @@ static const struct file_operations clear_fw_stats_ops = {
239 .llseek = default_llseek, 242 .llseek = default_llseek,
240}; 243};
241 244
245static ssize_t radar_detection_write(struct file *file,
246 const char __user *user_buf,
247 size_t count, loff_t *ppos)
248{
249 struct wl1271 *wl = file->private_data;
250 int ret;
251 u8 channel;
252
253 ret = kstrtou8_from_user(user_buf, count, 10, &channel);
254 if (ret < 0) {
255 wl1271_warning("illegal channel");
256 return -EINVAL;
257 }
258
259 mutex_lock(&wl->mutex);
260
261 if (unlikely(wl->state != WLCORE_STATE_ON))
262 goto out;
263
264 ret = wl1271_ps_elp_wakeup(wl);
265 if (ret < 0)
266 goto out;
267
268 ret = wl18xx_cmd_radar_detection_debug(wl, channel);
269 if (ret < 0)
270 count = ret;
271
272 wl1271_ps_elp_sleep(wl);
273out:
274 mutex_unlock(&wl->mutex);
275 return count;
276}
277
278static const struct file_operations radar_detection_ops = {
279 .write = radar_detection_write,
280 .open = simple_open,
281 .llseek = default_llseek,
282};
283
242int wl18xx_debugfs_add_files(struct wl1271 *wl, 284int wl18xx_debugfs_add_files(struct wl1271 *wl,
243 struct dentry *rootdir) 285 struct dentry *rootdir)
244{ 286{
@@ -390,6 +432,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
390 DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks); 432 DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
391 433
392 DEBUGFS_ADD(conf, moddir); 434 DEBUGFS_ADD(conf, moddir);
435 DEBUGFS_ADD(radar_detection, moddir);
393 436
394 return 0; 437 return 0;
395 438
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index eb1848e08424..c28f06854195 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -47,6 +47,19 @@ int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
47 return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout); 47 return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
48} 48}
49 49
50static const char *wl18xx_radar_type_decode(u8 radar_type)
51{
52 switch (radar_type) {
53 case RADAR_TYPE_REGULAR:
54 return "REGULAR";
55 case RADAR_TYPE_CHIRP:
56 return "CHIRP";
57 case RADAR_TYPE_NONE:
58 default:
59 return "N/A";
60 }
61}
62
50static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel, 63static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel,
51 u8 sync_band) 64 u8 sync_band)
52{ 65{
@@ -115,6 +128,14 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
115 wl18xx_scan_completed(wl, wl->scan_wlvif); 128 wl18xx_scan_completed(wl, wl->scan_wlvif);
116 } 129 }
117 130
131 if (vector & RADAR_DETECTED_EVENT_ID) {
132 wl1271_info("radar event: channel %d type %s",
133 mbox->radar_channel,
134 wl18xx_radar_type_decode(mbox->radar_type));
135
136 ieee80211_radar_detected(wl->hw);
137 }
138
118 if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) { 139 if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
119 wl1271_debug(DEBUG_EVENT, 140 wl1271_debug(DEBUG_EVENT,
120 "PERIODIC_SCAN_REPORT_EVENT (results %d)", 141 "PERIODIC_SCAN_REPORT_EVENT (results %d)",
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index 0680312d4943..266ee87834e4 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -42,6 +42,12 @@ enum {
42 SMART_CONFIG_DECODE_EVENT_ID = BIT(23), 42 SMART_CONFIG_DECODE_EVENT_ID = BIT(23),
43}; 43};
44 44
45enum wl18xx_radar_types {
46 RADAR_TYPE_NONE,
47 RADAR_TYPE_REGULAR,
48 RADAR_TYPE_CHIRP
49};
50
45struct wl18xx_event_mailbox { 51struct wl18xx_event_mailbox {
46 __le32 events_vector; 52 __le32 events_vector;
47 53
@@ -83,13 +89,19 @@ struct wl18xx_event_mailbox {
83 u8 sc_token_len; 89 u8 sc_token_len;
84 u8 padding1; 90 u8 padding1;
85 u8 sc_ssid[32]; 91 u8 sc_ssid[32];
86 u8 sc_pwd[32]; 92 u8 sc_pwd[64];
87 u8 sc_token[32]; 93 u8 sc_token[32];
88 94
89 /* smart config sync channel */ 95 /* smart config sync channel */
90 u8 sc_sync_channel; 96 u8 sc_sync_channel;
91 u8 sc_sync_band; 97 u8 sc_sync_band;
92 u8 padding2[2]; 98 u8 padding2[2];
99
100 /* radar detect */
101 u8 radar_channel;
102 u8 radar_type;
103
104 u8 padding3[2];
93} __packed; 105} __packed;
94 106
95int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, 107int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 8e562610bf16..717c4f5a02c2 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -378,6 +378,7 @@ static struct wlcore_conf wl18xx_conf = {
378 .keep_alive_interval = 55000, 378 .keep_alive_interval = 55000,
379 .max_listen_interval = 20, 379 .max_listen_interval = 20,
380 .sta_sleep_auth = WL1271_PSM_ILLEGAL, 380 .sta_sleep_auth = WL1271_PSM_ILLEGAL,
381 .suspend_rx_ba_activity = 0,
381 }, 382 },
382 .itrim = { 383 .itrim = {
383 .enable = false, 384 .enable = false,
@@ -567,6 +568,12 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
567 .high_power_val_2nd = 0xff, 568 .high_power_val_2nd = 0xff,
568 .tx_rf_margin = 1, 569 .tx_rf_margin = 1,
569 }, 570 },
571 .ap_sleep = { /* disabled by default */
572 .idle_duty_cycle = 0,
573 .connected_duty_cycle = 0,
574 .max_stations_thresh = 0,
575 .idle_conn_thresh = 0,
576 },
570}; 577};
571 578
572static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = { 579static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
@@ -648,7 +655,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
648}; 655};
649 656
650/* TODO: maybe move to a new header file? */ 657/* TODO: maybe move to a new header file? */
651#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-3.bin" 658#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-4.bin"
652 659
653static int wl18xx_identify_chip(struct wl1271 *wl) 660static int wl18xx_identify_chip(struct wl1271 *wl)
654{ 661{
@@ -983,6 +990,7 @@ static int wl18xx_boot(struct wl1271 *wl)
983 990
984 wl->event_mask = BSS_LOSS_EVENT_ID | 991 wl->event_mask = BSS_LOSS_EVENT_ID |
985 SCAN_COMPLETE_EVENT_ID | 992 SCAN_COMPLETE_EVENT_ID |
993 RADAR_DETECTED_EVENT_ID |
986 RSSI_SNR_TRIGGER_0_EVENT_ID | 994 RSSI_SNR_TRIGGER_0_EVENT_ID |
987 PERIODIC_SCAN_COMPLETE_EVENT_ID | 995 PERIODIC_SCAN_COMPLETE_EVENT_ID |
988 PERIODIC_SCAN_REPORT_EVENT_ID | 996 PERIODIC_SCAN_REPORT_EVENT_ID |
@@ -1559,26 +1567,19 @@ static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
1559} 1567}
1560 1568
1561static void wl18xx_sta_rc_update(struct wl1271 *wl, 1569static void wl18xx_sta_rc_update(struct wl1271 *wl,
1562 struct wl12xx_vif *wlvif, 1570 struct wl12xx_vif *wlvif)
1563 struct ieee80211_sta *sta,
1564 u32 changed)
1565{ 1571{
1566 bool wide = sta->bandwidth >= IEEE80211_STA_RX_BW_40; 1572 bool wide = wlvif->rc_update_bw >= IEEE80211_STA_RX_BW_40;
1567 1573
1568 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update wide %d", wide); 1574 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update wide %d", wide);
1569 1575
1570 if (!(changed & IEEE80211_RC_BW_CHANGED))
1571 return;
1572
1573 mutex_lock(&wl->mutex);
1574
1575 /* sanity */ 1576 /* sanity */
1576 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS)) 1577 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
1577 goto out; 1578 return;
1578 1579
1579 /* ignore the change before association */ 1580 /* ignore the change before association */
1580 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 1581 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1581 goto out; 1582 return;
1582 1583
1583 /* 1584 /*
1584 * If we started out as wide, we can change the operation mode. If we 1585 * If we started out as wide, we can change the operation mode. If we
@@ -1589,9 +1590,6 @@ static void wl18xx_sta_rc_update(struct wl1271 *wl,
1589 wl18xx_acx_peer_ht_operation_mode(wl, wlvif->sta.hlid, wide); 1590 wl18xx_acx_peer_ht_operation_mode(wl, wlvif->sta.hlid, wide);
1590 else 1591 else
1591 ieee80211_connection_loss(wl12xx_wlvif_to_vif(wlvif)); 1592 ieee80211_connection_loss(wl12xx_wlvif_to_vif(wlvif));
1592
1593out:
1594 mutex_unlock(&wl->mutex);
1595} 1593}
1596 1594
1597static int wl18xx_set_peer_cap(struct wl1271 *wl, 1595static int wl18xx_set_peer_cap(struct wl1271 *wl,
@@ -1703,6 +1701,11 @@ static struct wlcore_ops wl18xx_ops = {
1703 .smart_config_start = wl18xx_cmd_smart_config_start, 1701 .smart_config_start = wl18xx_cmd_smart_config_start,
1704 .smart_config_stop = wl18xx_cmd_smart_config_stop, 1702 .smart_config_stop = wl18xx_cmd_smart_config_stop,
1705 .smart_config_set_group_key = wl18xx_cmd_smart_config_set_group_key, 1703 .smart_config_set_group_key = wl18xx_cmd_smart_config_set_group_key,
1704 .interrupt_notify = wl18xx_acx_interrupt_notify_config,
1705 .rx_ba_filter = wl18xx_acx_rx_ba_filter,
1706 .ap_sleep = wl18xx_acx_ap_sleep,
1707 .set_cac = wl18xx_cmd_set_cac,
1708 .dfs_master_restart = wl18xx_cmd_dfs_master_restart,
1706}; 1709};
1707 1710
1708/* HT cap appropriate for wide channels in 2Ghz */ 1711/* HT cap appropriate for wide channels in 2Ghz */
@@ -1796,6 +1799,10 @@ wl18xx_iface_combinations[] = {
1796 .limits = wl18xx_iface_ap_limits, 1799 .limits = wl18xx_iface_ap_limits,
1797 .n_limits = ARRAY_SIZE(wl18xx_iface_ap_limits), 1800 .n_limits = ARRAY_SIZE(wl18xx_iface_ap_limits),
1798 .num_different_channels = 1, 1801 .num_different_channels = 1,
1802 .radar_detect_widths = BIT(NL80211_CHAN_NO_HT) |
1803 BIT(NL80211_CHAN_HT20) |
1804 BIT(NL80211_CHAN_HT40MINUS) |
1805 BIT(NL80211_CHAN_HT40PLUS),
1799 } 1806 }
1800}; 1807};
1801 1808
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 6a2b88030c1d..71e9e382ce80 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -26,10 +26,10 @@
26 26
27/* minimum FW required for driver */ 27/* minimum FW required for driver */
28#define WL18XX_CHIP_VER 8 28#define WL18XX_CHIP_VER 8
29#define WL18XX_IFTYPE_VER 8 29#define WL18XX_IFTYPE_VER 9
30#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE 30#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
31#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE 31#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
32#define WL18XX_MINOR_VER 13 32#define WL18XX_MINOR_VER 11
33 33
34#define WL18XX_CMD_MAX_SIZE 740 34#define WL18XX_CMD_MAX_SIZE 740
35 35
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index b924ceadc02c..f28fa3b5029d 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1725,7 +1725,7 @@ int wl12xx_acx_config_hangover(struct wl1271 *wl)
1725 acx->decrease_delta = conf->decrease_delta; 1725 acx->decrease_delta = conf->decrease_delta;
1726 acx->quiet_time = conf->quiet_time; 1726 acx->quiet_time = conf->quiet_time;
1727 acx->increase_time = conf->increase_time; 1727 acx->increase_time = conf->increase_time;
1728 acx->window_size = acx->window_size; 1728 acx->window_size = conf->window_size;
1729 1729
1730 ret = wl1271_cmd_configure(wl, ACX_CONFIG_HANGOVER, acx, 1730 ret = wl1271_cmd_configure(wl, ACX_CONFIG_HANGOVER, acx,
1731 sizeof(*acx)); 1731 sizeof(*acx));
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index b82661962d33..c26fc2106e5b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -403,7 +403,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
403 WARN_ON_ONCE(wl->active_link_count < 0); 403 WARN_ON_ONCE(wl->active_link_count < 0);
404} 404}
405 405
406static u8 wlcore_get_native_channel_type(u8 nl_channel_type) 406u8 wlcore_get_native_channel_type(u8 nl_channel_type)
407{ 407{
408 switch (nl_channel_type) { 408 switch (nl_channel_type) {
409 case NL80211_CHAN_NO_HT: 409 case NL80211_CHAN_NO_HT:
@@ -419,6 +419,7 @@ static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
419 return WLCORE_CHAN_NO_HT; 419 return WLCORE_CHAN_NO_HT;
420 } 420 }
421} 421}
422EXPORT_SYMBOL_GPL(wlcore_get_native_channel_type);
422 423
423static int wl12xx_cmd_role_start_dev(struct wl1271 *wl, 424static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
424 struct wl12xx_vif *wlvif, 425 struct wl12xx_vif *wlvif,
@@ -1686,9 +1687,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
1686{ 1687{
1687 struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL; 1688 struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL;
1688 int ret = 0, i, b, ch_bit_idx; 1689 int ret = 0, i, b, ch_bit_idx;
1689 struct ieee80211_channel *channel;
1690 u32 tmp_ch_bitmap[2]; 1690 u32 tmp_ch_bitmap[2];
1691 u16 ch;
1692 struct wiphy *wiphy = wl->hw->wiphy; 1691 struct wiphy *wiphy = wl->hw->wiphy;
1693 struct ieee80211_supported_band *band; 1692 struct ieee80211_supported_band *band;
1694 bool timeout = false; 1693 bool timeout = false;
@@ -1703,12 +1702,16 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
1703 for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) { 1702 for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) {
1704 band = wiphy->bands[b]; 1703 band = wiphy->bands[b];
1705 for (i = 0; i < band->n_channels; i++) { 1704 for (i = 0; i < band->n_channels; i++) {
1706 channel = &band->channels[i]; 1705 struct ieee80211_channel *channel = &band->channels[i];
1707 ch = channel->hw_value; 1706 u16 ch = channel->hw_value;
1707 u32 flags = channel->flags;
1708 1708
1709 if (channel->flags & (IEEE80211_CHAN_DISABLED | 1709 if (flags & (IEEE80211_CHAN_DISABLED |
1710 IEEE80211_CHAN_RADAR | 1710 IEEE80211_CHAN_NO_IR))
1711 IEEE80211_CHAN_NO_IR)) 1711 continue;
1712
1713 if ((flags & IEEE80211_CHAN_RADAR) &&
1714 channel->dfs_state != NL80211_DFS_AVAILABLE)
1712 continue; 1715 continue;
1713 1716
1714 ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch); 1717 ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch);
@@ -1733,6 +1736,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
1733 1736
1734 cmd->ch_bit_map1 = cpu_to_le32(tmp_ch_bitmap[0]); 1737 cmd->ch_bit_map1 = cpu_to_le32(tmp_ch_bitmap[0]);
1735 cmd->ch_bit_map2 = cpu_to_le32(tmp_ch_bitmap[1]); 1738 cmd->ch_bit_map2 = cpu_to_le32(tmp_ch_bitmap[1]);
1739 cmd->dfs_region = wl->dfs_region;
1736 1740
1737 wl1271_debug(DEBUG_CMD, 1741 wl1271_debug(DEBUG_CMD,
1738 "cmd reg domain bitmap1: 0x%08x, bitmap2: 0x%08x", 1742 "cmd reg domain bitmap1: 0x%08x, bitmap2: 0x%08x",
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 453684a71d30..e14cd407a6ae 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -105,6 +105,7 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
105void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid); 105void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
106int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, 106int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
107 u32 mask, bool *timeout); 107 u32 mask, bool *timeout);
108u8 wlcore_get_native_channel_type(u8 nl_channel_type);
108 109
109enum wl1271_commands { 110enum wl1271_commands {
110 CMD_INTERROGATE = 1, /* use this to read information elements */ 111 CMD_INTERROGATE = 1, /* use this to read information elements */
@@ -172,6 +173,11 @@ enum wl1271_commands {
172 CMD_SMART_CONFIG_STOP = 62, 173 CMD_SMART_CONFIG_STOP = 62,
173 CMD_SMART_CONFIG_SET_GROUP_KEY = 63, 174 CMD_SMART_CONFIG_SET_GROUP_KEY = 63,
174 175
176 CMD_CAC_START = 64,
177 CMD_CAC_STOP = 65,
178 CMD_DFS_MASTER_RESTART = 66,
179 CMD_DFS_RADAR_DETECTION_DEBUG = 67,
180
175 MAX_COMMAND_ID = 0xFFFF, 181 MAX_COMMAND_ID = 0xFFFF,
176}; 182};
177 183
@@ -642,6 +648,8 @@ struct wl12xx_cmd_regdomain_dfs_config {
642 648
643 __le32 ch_bit_map1; 649 __le32 ch_bit_map1;
644 __le32 ch_bit_map2; 650 __le32 ch_bit_map2;
651 u8 dfs_region;
652 u8 padding[3];
645} __packed; 653} __packed;
646 654
647struct wl12xx_cmd_config_fwlog { 655struct wl12xx_cmd_config_fwlog {
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 40995c42bef8..166add00b50f 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -997,6 +997,11 @@ struct conf_conn_settings {
997 * whether we can go to ELP. 997 * whether we can go to ELP.
998 */ 998 */
999 u8 sta_sleep_auth; 999 u8 sta_sleep_auth;
1000
1001 /*
1002 * Default RX BA Activity filter configuration
1003 */
1004 u8 suspend_rx_ba_activity;
1000} __packed; 1005} __packed;
1001 1006
1002enum { 1007enum {
@@ -1347,7 +1352,7 @@ struct conf_recovery_settings {
1347 * version, the two LSB are the lower driver's private conf 1352 * version, the two LSB are the lower driver's private conf
1348 * version. 1353 * version.
1349 */ 1354 */
1350#define WLCORE_CONF_VERSION (0x0005 << 16) 1355#define WLCORE_CONF_VERSION (0x0006 << 16)
1351#define WLCORE_CONF_MASK 0xffff0000 1356#define WLCORE_CONF_MASK 0xffff0000
1352#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \ 1357#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \
1353 sizeof(struct wlcore_conf)) 1358 sizeof(struct wlcore_conf))
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 0be21f62fcb0..68f3bf229b5a 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -929,17 +929,10 @@ static ssize_t beacon_filtering_write(struct file *file,
929{ 929{
930 struct wl1271 *wl = file->private_data; 930 struct wl1271 *wl = file->private_data;
931 struct wl12xx_vif *wlvif; 931 struct wl12xx_vif *wlvif;
932 char buf[10];
933 size_t len;
934 unsigned long value; 932 unsigned long value;
935 int ret; 933 int ret;
936 934
937 len = min(count, sizeof(buf) - 1); 935 ret = kstrtoul_from_user(user_buf, count, 0, &value);
938 if (copy_from_user(buf, user_buf, len))
939 return -EFAULT;
940 buf[len] = '\0';
941
942 ret = kstrtoul(buf, 0, &value);
943 if (ret < 0) { 936 if (ret < 0) {
944 wl1271_warning("illegal value for beacon_filtering!"); 937 wl1271_warning("illegal value for beacon_filtering!");
945 return -EINVAL; 938 return -EINVAL;
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 5153640f4532..c42e78955e7b 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -139,7 +139,7 @@ void wlcore_event_channel_switch(struct wl1271 *wl,
139 wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d", 139 wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
140 __func__, roles_bitmap, success); 140 __func__, roles_bitmap, success);
141 141
142 wl12xx_for_each_wlvif_sta(wl, wlvif) { 142 wl12xx_for_each_wlvif(wl, wlvif) {
143 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID || 143 if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
144 !test_bit(wlvif->role_id , &roles_bitmap)) 144 !test_bit(wlvif->role_id , &roles_bitmap))
145 continue; 145 continue;
@@ -150,8 +150,13 @@ void wlcore_event_channel_switch(struct wl1271 *wl,
150 150
151 vif = wl12xx_wlvif_to_vif(wlvif); 151 vif = wl12xx_wlvif_to_vif(wlvif);
152 152
153 ieee80211_chswitch_done(vif, success); 153 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
154 cancel_delayed_work(&wlvif->channel_switch_work); 154 ieee80211_chswitch_done(vif, success);
155 cancel_delayed_work(&wlvif->channel_switch_work);
156 } else {
157 set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
158 ieee80211_csa_finish(vif);
159 }
155 } 160 }
156} 161}
157EXPORT_SYMBOL_GPL(wlcore_event_channel_switch); 162EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index aa9f82c72296..eec56935b1b6 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -211,11 +211,35 @@ wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len)
211} 211}
212 212
213static inline void 213static inline void
214wlcore_hw_sta_rc_update(struct wl1271 *wl, struct wl12xx_vif *wlvif, 214wlcore_hw_sta_rc_update(struct wl1271 *wl, struct wl12xx_vif *wlvif)
215 struct ieee80211_sta *sta, u32 changed)
216{ 215{
217 if (wl->ops->sta_rc_update) 216 if (wl->ops->sta_rc_update)
218 wl->ops->sta_rc_update(wl, wlvif, sta, changed); 217 wl->ops->sta_rc_update(wl, wlvif);
218}
219
220static inline int
221wlcore_hw_interrupt_notify(struct wl1271 *wl, bool action)
222{
223 if (wl->ops->interrupt_notify)
224 return wl->ops->interrupt_notify(wl, action);
225 return 0;
226}
227
228static inline int
229wlcore_hw_rx_ba_filter(struct wl1271 *wl, bool action)
230{
231 if (wl->ops->rx_ba_filter)
232 return wl->ops->rx_ba_filter(wl, action);
233 return 0;
234}
235
236static inline int
237wlcore_hw_ap_sleep(struct wl1271 *wl)
238{
239 if (wl->ops->ap_sleep)
240 return wl->ops->ap_sleep(wl);
241
242 return 0;
219} 243}
220 244
221static inline int 245static inline int
@@ -287,4 +311,22 @@ wlcore_smart_config_set_group_key(struct wl1271 *wl, u16 group_id,
287 311
288 return wl->ops->smart_config_set_group_key(wl, group_id, key_len, key); 312 return wl->ops->smart_config_set_group_key(wl, group_id, key_len, key);
289} 313}
314
315static inline int
316wlcore_hw_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start)
317{
318 if (!wl->ops->set_cac)
319 return -EINVAL;
320
321 return wl->ops->set_cac(wl, wlvif, start);
322}
323
324static inline int
325wlcore_hw_dfs_master_restart(struct wl1271 *wl, struct wl12xx_vif *wlvif)
326{
327 if (!wl->ops->dfs_master_restart)
328 return -EINVAL;
329
330 return wl->ops->dfs_master_restart(wl, wlvif);
331}
290#endif 332#endif
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 199e94120864..5ca1fb161a50 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -392,6 +392,11 @@ static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
392 if (ret < 0) 392 if (ret < 0)
393 return ret; 393 return ret;
394 394
395 /* configure AP sleep, if enabled */
396 ret = wlcore_hw_ap_sleep(wl);
397 if (ret < 0)
398 return ret;
399
395 return 0; 400 return 0;
396} 401}
397 402
@@ -567,8 +572,7 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
567 /* consider all existing roles before configuring psm. */ 572 /* consider all existing roles before configuring psm. */
568 573
569 if (wl->ap_count == 0 && is_ap) { /* first AP */ 574 if (wl->ap_count == 0 && is_ap) { /* first AP */
570 /* Configure for power always on */ 575 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
571 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
572 if (ret < 0) 576 if (ret < 0)
573 return ret; 577 return ret;
574 578
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 6ad3fcedab9b..1e136993580f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -79,22 +79,12 @@ static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
79static void wl1271_reg_notify(struct wiphy *wiphy, 79static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request) 80 struct regulatory_request *request)
81{ 81{
82 struct ieee80211_supported_band *band;
83 struct ieee80211_channel *ch;
84 int i;
85 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 82 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
86 struct wl1271 *wl = hw->priv; 83 struct wl1271 *wl = hw->priv;
87 84
88 band = wiphy->bands[IEEE80211_BAND_5GHZ]; 85 /* copy the current dfs region */
89 for (i = 0; i < band->n_channels; i++) { 86 if (request)
90 ch = &band->channels[i]; 87 wl->dfs_region = request->dfs_region;
91 if (ch->flags & IEEE80211_CHAN_DISABLED)
92 continue;
93
94 if (ch->flags & IEEE80211_CHAN_RADAR)
95 ch->flags |= IEEE80211_CHAN_NO_IR;
96
97 }
98 88
99 wlcore_regdomain_config(wl); 89 wlcore_regdomain_config(wl);
100} 90}
@@ -226,6 +216,29 @@ void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
226 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout)); 216 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
227} 217}
228 218
219static void wlcore_rc_update_work(struct work_struct *work)
220{
221 int ret;
222 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
223 rc_update_work);
224 struct wl1271 *wl = wlvif->wl;
225
226 mutex_lock(&wl->mutex);
227
228 if (unlikely(wl->state != WLCORE_STATE_ON))
229 goto out;
230
231 ret = wl1271_ps_elp_wakeup(wl);
232 if (ret < 0)
233 goto out;
234
235 wlcore_hw_sta_rc_update(wl, wlvif);
236
237 wl1271_ps_elp_sleep(wl);
238out:
239 mutex_unlock(&wl->mutex);
240}
241
229static void wl12xx_tx_watchdog_work(struct work_struct *work) 242static void wl12xx_tx_watchdog_work(struct work_struct *work)
230{ 243{
231 struct delayed_work *dwork; 244 struct delayed_work *dwork;
@@ -1662,19 +1675,15 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1662 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 1675 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1663 goto out; 1676 goto out;
1664 1677
1665 ret = wl1271_ps_elp_wakeup(wl);
1666 if (ret < 0)
1667 goto out;
1668
1669 ret = wl1271_configure_wowlan(wl, wow); 1678 ret = wl1271_configure_wowlan(wl, wow);
1670 if (ret < 0) 1679 if (ret < 0)
1671 goto out_sleep; 1680 goto out;
1672 1681
1673 if ((wl->conf.conn.suspend_wake_up_event == 1682 if ((wl->conf.conn.suspend_wake_up_event ==
1674 wl->conf.conn.wake_up_event) && 1683 wl->conf.conn.wake_up_event) &&
1675 (wl->conf.conn.suspend_listen_interval == 1684 (wl->conf.conn.suspend_listen_interval ==
1676 wl->conf.conn.listen_interval)) 1685 wl->conf.conn.listen_interval))
1677 goto out_sleep; 1686 goto out;
1678 1687
1679 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1688 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1680 wl->conf.conn.suspend_wake_up_event, 1689 wl->conf.conn.suspend_wake_up_event,
@@ -1682,29 +1691,28 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1682 1691
1683 if (ret < 0) 1692 if (ret < 0)
1684 wl1271_error("suspend: set wake up conditions failed: %d", ret); 1693 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1685
1686out_sleep:
1687 wl1271_ps_elp_sleep(wl);
1688out: 1694out:
1689 return ret; 1695 return ret;
1690 1696
1691} 1697}
1692 1698
1693static int wl1271_configure_suspend_ap(struct wl1271 *wl, 1699static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1694 struct wl12xx_vif *wlvif) 1700 struct wl12xx_vif *wlvif,
1701 struct cfg80211_wowlan *wow)
1695{ 1702{
1696 int ret = 0; 1703 int ret = 0;
1697 1704
1698 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) 1705 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1699 goto out; 1706 goto out;
1700 1707
1701 ret = wl1271_ps_elp_wakeup(wl); 1708 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1702 if (ret < 0) 1709 if (ret < 0)
1703 goto out; 1710 goto out;
1704 1711
1705 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); 1712 ret = wl1271_configure_wowlan(wl, wow);
1713 if (ret < 0)
1714 goto out;
1706 1715
1707 wl1271_ps_elp_sleep(wl);
1708out: 1716out:
1709 return ret; 1717 return ret;
1710 1718
@@ -1717,7 +1725,7 @@ static int wl1271_configure_suspend(struct wl1271 *wl,
1717 if (wlvif->bss_type == BSS_TYPE_STA_BSS) 1725 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1718 return wl1271_configure_suspend_sta(wl, wlvif, wow); 1726 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1719 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 1727 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1720 return wl1271_configure_suspend_ap(wl, wlvif); 1728 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1721 return 0; 1729 return 0;
1722} 1730}
1723 1731
@@ -1730,21 +1738,18 @@ static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1730 if ((!is_ap) && (!is_sta)) 1738 if ((!is_ap) && (!is_sta))
1731 return; 1739 return;
1732 1740
1733 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 1741 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1742 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1734 return; 1743 return;
1735 1744
1736 ret = wl1271_ps_elp_wakeup(wl); 1745 wl1271_configure_wowlan(wl, NULL);
1737 if (ret < 0)
1738 return;
1739 1746
1740 if (is_sta) { 1747 if (is_sta) {
1741 wl1271_configure_wowlan(wl, NULL);
1742
1743 if ((wl->conf.conn.suspend_wake_up_event == 1748 if ((wl->conf.conn.suspend_wake_up_event ==
1744 wl->conf.conn.wake_up_event) && 1749 wl->conf.conn.wake_up_event) &&
1745 (wl->conf.conn.suspend_listen_interval == 1750 (wl->conf.conn.suspend_listen_interval ==
1746 wl->conf.conn.listen_interval)) 1751 wl->conf.conn.listen_interval))
1747 goto out_sleep; 1752 return;
1748 1753
1749 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1754 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1750 wl->conf.conn.wake_up_event, 1755 wl->conf.conn.wake_up_event,
@@ -1757,9 +1762,6 @@ static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1757 } else if (is_ap) { 1762 } else if (is_ap) {
1758 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); 1763 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1759 } 1764 }
1760
1761out_sleep:
1762 wl1271_ps_elp_sleep(wl);
1763} 1765}
1764 1766
1765static int wl1271_op_suspend(struct ieee80211_hw *hw, 1767static int wl1271_op_suspend(struct ieee80211_hw *hw,
@@ -1781,6 +1783,13 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1781 wl1271_tx_flush(wl); 1783 wl1271_tx_flush(wl);
1782 1784
1783 mutex_lock(&wl->mutex); 1785 mutex_lock(&wl->mutex);
1786
1787 ret = wl1271_ps_elp_wakeup(wl);
1788 if (ret < 0) {
1789 mutex_unlock(&wl->mutex);
1790 return ret;
1791 }
1792
1784 wl->wow_enabled = true; 1793 wl->wow_enabled = true;
1785 wl12xx_for_each_wlvif(wl, wlvif) { 1794 wl12xx_for_each_wlvif(wl, wlvif) {
1786 ret = wl1271_configure_suspend(wl, wlvif, wow); 1795 ret = wl1271_configure_suspend(wl, wlvif, wow);
@@ -1790,7 +1799,27 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1790 return ret; 1799 return ret;
1791 } 1800 }
1792 } 1801 }
1802
1803 /* disable fast link flow control notifications from FW */
1804 ret = wlcore_hw_interrupt_notify(wl, false);
1805 if (ret < 0)
1806 goto out_sleep;
1807
1808 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1809 ret = wlcore_hw_rx_ba_filter(wl,
1810 !!wl->conf.conn.suspend_rx_ba_activity);
1811 if (ret < 0)
1812 goto out_sleep;
1813
1814out_sleep:
1815 wl1271_ps_elp_sleep(wl);
1793 mutex_unlock(&wl->mutex); 1816 mutex_unlock(&wl->mutex);
1817
1818 if (ret < 0) {
1819 wl1271_warning("couldn't prepare device to suspend");
1820 return ret;
1821 }
1822
1794 /* flush any remaining work */ 1823 /* flush any remaining work */
1795 wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); 1824 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1796 1825
@@ -1864,13 +1893,29 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
1864 if (pending_recovery) { 1893 if (pending_recovery) {
1865 wl1271_warning("queuing forgotten recovery on resume"); 1894 wl1271_warning("queuing forgotten recovery on resume");
1866 ieee80211_queue_work(wl->hw, &wl->recovery_work); 1895 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1867 goto out; 1896 goto out_sleep;
1868 } 1897 }
1869 1898
1899 ret = wl1271_ps_elp_wakeup(wl);
1900 if (ret < 0)
1901 goto out;
1902
1870 wl12xx_for_each_wlvif(wl, wlvif) { 1903 wl12xx_for_each_wlvif(wl, wlvif) {
1871 wl1271_configure_resume(wl, wlvif); 1904 wl1271_configure_resume(wl, wlvif);
1872 } 1905 }
1873 1906
1907 ret = wlcore_hw_interrupt_notify(wl, true);
1908 if (ret < 0)
1909 goto out_sleep;
1910
1911 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1912 ret = wlcore_hw_rx_ba_filter(wl, false);
1913 if (ret < 0)
1914 goto out_sleep;
1915
1916out_sleep:
1917 wl1271_ps_elp_sleep(wl);
1918
1874out: 1919out:
1875 wl->wow_enabled = false; 1920 wl->wow_enabled = false;
1876 1921
@@ -2279,6 +2324,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2279 wl1271_rx_streaming_enable_work); 2324 wl1271_rx_streaming_enable_work);
2280 INIT_WORK(&wlvif->rx_streaming_disable_work, 2325 INIT_WORK(&wlvif->rx_streaming_disable_work,
2281 wl1271_rx_streaming_disable_work); 2326 wl1271_rx_streaming_disable_work);
2327 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2282 INIT_DELAYED_WORK(&wlvif->channel_switch_work, 2328 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2283 wlcore_channel_switch_work); 2329 wlcore_channel_switch_work);
2284 INIT_DELAYED_WORK(&wlvif->connection_loss_work, 2330 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
@@ -2508,6 +2554,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2508 } 2554 }
2509 2555
2510 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 2556 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2557 IEEE80211_VIF_SUPPORTS_UAPSD |
2511 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 2558 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2512 2559
2513 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 2560 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
@@ -2723,6 +2770,7 @@ unlock:
2723 del_timer_sync(&wlvif->rx_streaming_timer); 2770 del_timer_sync(&wlvif->rx_streaming_timer);
2724 cancel_work_sync(&wlvif->rx_streaming_enable_work); 2771 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2725 cancel_work_sync(&wlvif->rx_streaming_disable_work); 2772 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2773 cancel_work_sync(&wlvif->rc_update_work);
2726 cancel_delayed_work_sync(&wlvif->connection_loss_work); 2774 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2727 cancel_delayed_work_sync(&wlvif->channel_switch_work); 2775 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2728 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work); 2776 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
@@ -4072,8 +4120,14 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4072 ret = wlcore_set_beacon_template(wl, vif, is_ap); 4120 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4073 if (ret < 0) 4121 if (ret < 0)
4074 goto out; 4122 goto out;
4075 }
4076 4123
4124 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4125 &wlvif->flags)) {
4126 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4127 if (ret < 0)
4128 goto out;
4129 }
4130 }
4077out: 4131out:
4078 if (ret != 0) 4132 if (ret != 0)
4079 wl1271_error("beacon info change failed: %d", ret); 4133 wl1271_error("beacon info change failed: %d", ret);
@@ -4574,10 +4628,46 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4574 struct ieee80211_chanctx_conf *ctx, 4628 struct ieee80211_chanctx_conf *ctx,
4575 u32 changed) 4629 u32 changed)
4576{ 4630{
4631 struct wl1271 *wl = hw->priv;
4632 struct wl12xx_vif *wlvif;
4633 int ret;
4634 int channel = ieee80211_frequency_to_channel(
4635 ctx->def.chan->center_freq);
4636
4577 wl1271_debug(DEBUG_MAC80211, 4637 wl1271_debug(DEBUG_MAC80211,
4578 "mac80211 change chanctx %d (type %d) changed 0x%x", 4638 "mac80211 change chanctx %d (type %d) changed 0x%x",
4579 ieee80211_frequency_to_channel(ctx->def.chan->center_freq), 4639 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4580 cfg80211_get_chandef_type(&ctx->def), changed); 4640
4641 mutex_lock(&wl->mutex);
4642
4643 ret = wl1271_ps_elp_wakeup(wl);
4644 if (ret < 0)
4645 goto out;
4646
4647 wl12xx_for_each_wlvif(wl, wlvif) {
4648 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4649
4650 rcu_read_lock();
4651 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4652 rcu_read_unlock();
4653 continue;
4654 }
4655 rcu_read_unlock();
4656
4657 /* start radar if needed */
4658 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4659 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4660 ctx->radar_enabled && !wlvif->radar_enabled &&
4661 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4662 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4663 wlcore_hw_set_cac(wl, wlvif, true);
4664 wlvif->radar_enabled = true;
4665 }
4666 }
4667
4668 wl1271_ps_elp_sleep(wl);
4669out:
4670 mutex_unlock(&wl->mutex);
4581} 4671}
4582 4672
4583static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, 4673static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
@@ -4588,13 +4678,26 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4588 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4678 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4589 int channel = ieee80211_frequency_to_channel( 4679 int channel = ieee80211_frequency_to_channel(
4590 ctx->def.chan->center_freq); 4680 ctx->def.chan->center_freq);
4681 int ret = -EINVAL;
4591 4682
4592 wl1271_debug(DEBUG_MAC80211, 4683 wl1271_debug(DEBUG_MAC80211,
4593 "mac80211 assign chanctx (role %d) %d (type %d)", 4684 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4594 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def)); 4685 wlvif->role_id, channel,
4686 cfg80211_get_chandef_type(&ctx->def),
4687 ctx->radar_enabled, ctx->def.chan->dfs_state);
4595 4688
4596 mutex_lock(&wl->mutex); 4689 mutex_lock(&wl->mutex);
4597 4690
4691 if (unlikely(wl->state != WLCORE_STATE_ON))
4692 goto out;
4693
4694 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4695 goto out;
4696
4697 ret = wl1271_ps_elp_wakeup(wl);
4698 if (ret < 0)
4699 goto out;
4700
4598 wlvif->band = ctx->def.chan->band; 4701 wlvif->band = ctx->def.chan->band;
4599 wlvif->channel = channel; 4702 wlvif->channel = channel;
4600 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def); 4703 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
@@ -4602,6 +4705,15 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4602 /* update default rates according to the band */ 4705 /* update default rates according to the band */
4603 wl1271_set_band_rate(wl, wlvif); 4706 wl1271_set_band_rate(wl, wlvif);
4604 4707
4708 if (ctx->radar_enabled &&
4709 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4710 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4711 wlcore_hw_set_cac(wl, wlvif, true);
4712 wlvif->radar_enabled = true;
4713 }
4714
4715 wl1271_ps_elp_sleep(wl);
4716out:
4605 mutex_unlock(&wl->mutex); 4717 mutex_unlock(&wl->mutex);
4606 4718
4607 return 0; 4719 return 0;
@@ -4613,6 +4725,7 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4613{ 4725{
4614 struct wl1271 *wl = hw->priv; 4726 struct wl1271 *wl = hw->priv;
4615 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4727 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4728 int ret;
4616 4729
4617 wl1271_debug(DEBUG_MAC80211, 4730 wl1271_debug(DEBUG_MAC80211,
4618 "mac80211 unassign chanctx (role %d) %d (type %d)", 4731 "mac80211 unassign chanctx (role %d) %d (type %d)",
@@ -4621,6 +4734,99 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4621 cfg80211_get_chandef_type(&ctx->def)); 4734 cfg80211_get_chandef_type(&ctx->def));
4622 4735
4623 wl1271_tx_flush(wl); 4736 wl1271_tx_flush(wl);
4737
4738 mutex_lock(&wl->mutex);
4739
4740 if (unlikely(wl->state != WLCORE_STATE_ON))
4741 goto out;
4742
4743 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4744 goto out;
4745
4746 ret = wl1271_ps_elp_wakeup(wl);
4747 if (ret < 0)
4748 goto out;
4749
4750 if (wlvif->radar_enabled) {
4751 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4752 wlcore_hw_set_cac(wl, wlvif, false);
4753 wlvif->radar_enabled = false;
4754 }
4755
4756 wl1271_ps_elp_sleep(wl);
4757out:
4758 mutex_unlock(&wl->mutex);
4759}
4760
4761static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4762 struct wl12xx_vif *wlvif,
4763 struct ieee80211_chanctx_conf *new_ctx)
4764{
4765 int channel = ieee80211_frequency_to_channel(
4766 new_ctx->def.chan->center_freq);
4767
4768 wl1271_debug(DEBUG_MAC80211,
4769 "switch vif (role %d) %d -> %d chan_type: %d",
4770 wlvif->role_id, wlvif->channel, channel,
4771 cfg80211_get_chandef_type(&new_ctx->def));
4772
4773 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4774 return 0;
4775
4776 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4777
4778 if (wlvif->radar_enabled) {
4779 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4780 wlcore_hw_set_cac(wl, wlvif, false);
4781 wlvif->radar_enabled = false;
4782 }
4783
4784 wlvif->band = new_ctx->def.chan->band;
4785 wlvif->channel = channel;
4786 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4787
4788 /* start radar if needed */
4789 if (new_ctx->radar_enabled) {
4790 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4791 wlcore_hw_set_cac(wl, wlvif, true);
4792 wlvif->radar_enabled = true;
4793 }
4794
4795 return 0;
4796}
4797
4798static int
4799wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4800 struct ieee80211_vif_chanctx_switch *vifs,
4801 int n_vifs,
4802 enum ieee80211_chanctx_switch_mode mode)
4803{
4804 struct wl1271 *wl = hw->priv;
4805 int i, ret;
4806
4807 wl1271_debug(DEBUG_MAC80211,
4808 "mac80211 switch chanctx n_vifs %d mode %d",
4809 n_vifs, mode);
4810
4811 mutex_lock(&wl->mutex);
4812
4813 ret = wl1271_ps_elp_wakeup(wl);
4814 if (ret < 0)
4815 goto out;
4816
4817 for (i = 0; i < n_vifs; i++) {
4818 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4819
4820 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4821 if (ret)
4822 goto out_sleep;
4823 }
4824out_sleep:
4825 wl1271_ps_elp_sleep(wl);
4826out:
4827 mutex_unlock(&wl->mutex);
4828
4829 return 0;
4624} 4830}
4625 4831
4626static int wl1271_op_conf_tx(struct ieee80211_hw *hw, 4832static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
@@ -5228,6 +5434,83 @@ out:
5228 mutex_unlock(&wl->mutex); 5434 mutex_unlock(&wl->mutex);
5229} 5435}
5230 5436
5437static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5438 struct wl12xx_vif *wlvif,
5439 u8 eid)
5440{
5441 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5442 struct sk_buff *beacon =
5443 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5444
5445 if (!beacon)
5446 return NULL;
5447
5448 return cfg80211_find_ie(eid,
5449 beacon->data + ieoffset,
5450 beacon->len - ieoffset);
5451}
5452
5453static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5454 u8 *csa_count)
5455{
5456 const u8 *ie;
5457 const struct ieee80211_channel_sw_ie *ie_csa;
5458
5459 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5460 if (!ie)
5461 return -EINVAL;
5462
5463 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5464 *csa_count = ie_csa->count;
5465
5466 return 0;
5467}
5468
5469static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5470 struct ieee80211_vif *vif,
5471 struct cfg80211_chan_def *chandef)
5472{
5473 struct wl1271 *wl = hw->priv;
5474 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5475 struct ieee80211_channel_switch ch_switch = {
5476 .block_tx = true,
5477 .chandef = *chandef,
5478 };
5479 int ret;
5480
5481 wl1271_debug(DEBUG_MAC80211,
5482 "mac80211 channel switch beacon (role %d)",
5483 wlvif->role_id);
5484
5485 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5486 if (ret < 0) {
5487 wl1271_error("error getting beacon (for CSA counter)");
5488 return;
5489 }
5490
5491 mutex_lock(&wl->mutex);
5492
5493 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5494 ret = -EBUSY;
5495 goto out;
5496 }
5497
5498 ret = wl1271_ps_elp_wakeup(wl);
5499 if (ret < 0)
5500 goto out;
5501
5502 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5503 if (ret)
5504 goto out_sleep;
5505
5506 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5507
5508out_sleep:
5509 wl1271_ps_elp_sleep(wl);
5510out:
5511 mutex_unlock(&wl->mutex);
5512}
5513
5231static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 5514static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5232 u32 queues, bool drop) 5515 u32 queues, bool drop)
5233{ 5516{
@@ -5370,19 +5653,26 @@ static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5370 u32 changed) 5653 u32 changed)
5371{ 5654{
5372 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5655 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5373 struct wl1271 *wl = hw->priv;
5374 5656
5375 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed); 5657 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5658
5659 if (!(changed & IEEE80211_RC_BW_CHANGED))
5660 return;
5661
5662 /* this callback is atomic, so schedule a new work */
5663 wlvif->rc_update_bw = sta->bandwidth;
5664 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5376} 5665}
5377 5666
5378static int wlcore_op_get_rssi(struct ieee80211_hw *hw, 5667static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5379 struct ieee80211_vif *vif, 5668 struct ieee80211_vif *vif,
5380 struct ieee80211_sta *sta, 5669 struct ieee80211_sta *sta,
5381 s8 *rssi_dbm) 5670 struct station_info *sinfo)
5382{ 5671{
5383 struct wl1271 *wl = hw->priv; 5672 struct wl1271 *wl = hw->priv;
5384 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5673 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5385 int ret = 0; 5674 s8 rssi_dbm;
5675 int ret;
5386 5676
5387 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi"); 5677 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5388 5678
@@ -5395,17 +5685,18 @@ static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5395 if (ret < 0) 5685 if (ret < 0)
5396 goto out_sleep; 5686 goto out_sleep;
5397 5687
5398 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm); 5688 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5399 if (ret < 0) 5689 if (ret < 0)
5400 goto out_sleep; 5690 goto out_sleep;
5401 5691
5692 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5693 sinfo->signal = rssi_dbm;
5694
5402out_sleep: 5695out_sleep:
5403 wl1271_ps_elp_sleep(wl); 5696 wl1271_ps_elp_sleep(wl);
5404 5697
5405out: 5698out:
5406 mutex_unlock(&wl->mutex); 5699 mutex_unlock(&wl->mutex);
5407
5408 return ret;
5409} 5700}
5410 5701
5411static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) 5702static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
@@ -5596,6 +5887,7 @@ static const struct ieee80211_ops wl1271_ops = {
5596 .set_bitrate_mask = wl12xx_set_bitrate_mask, 5887 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5597 .set_default_unicast_key = wl1271_op_set_default_key_idx, 5888 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5598 .channel_switch = wl12xx_op_channel_switch, 5889 .channel_switch = wl12xx_op_channel_switch,
5890 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5599 .flush = wlcore_op_flush, 5891 .flush = wlcore_op_flush,
5600 .remain_on_channel = wlcore_op_remain_on_channel, 5892 .remain_on_channel = wlcore_op_remain_on_channel,
5601 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel, 5893 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
@@ -5604,8 +5896,9 @@ static const struct ieee80211_ops wl1271_ops = {
5604 .change_chanctx = wlcore_op_change_chanctx, 5896 .change_chanctx = wlcore_op_change_chanctx,
5605 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx, 5897 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5606 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx, 5898 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5899 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5607 .sta_rc_update = wlcore_op_sta_rc_update, 5900 .sta_rc_update = wlcore_op_sta_rc_update,
5608 .get_rssi = wlcore_op_get_rssi, 5901 .sta_statistics = wlcore_op_sta_statistics,
5609 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 5902 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5610}; 5903};
5611 5904
@@ -5776,7 +6069,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5776 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 6069 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5777 IEEE80211_HW_SUPPORTS_PS | 6070 IEEE80211_HW_SUPPORTS_PS |
5778 IEEE80211_HW_SUPPORTS_DYNAMIC_PS | 6071 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5779 IEEE80211_HW_SUPPORTS_UAPSD |
5780 IEEE80211_HW_HAS_RATE_CONTROL | 6072 IEEE80211_HW_HAS_RATE_CONTROL |
5781 IEEE80211_HW_CONNECTION_MONITOR | 6073 IEEE80211_HW_CONNECTION_MONITOR |
5782 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 6074 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
@@ -5811,7 +6103,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5811 6103
5812 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | 6104 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5813 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | 6105 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5814 WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 6106 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6107 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
5815 6108
5816 /* make sure all our channels fit in the scanned_ch bitmask */ 6109 /* make sure all our channels fit in the scanned_ch bitmask */
5817 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + 6110 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index b52516eed7b2..4cd316e61466 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -56,9 +56,6 @@ void wl1271_elp_work(struct work_struct *work)
56 goto out; 56 goto out;
57 57
58 wl12xx_for_each_wlvif(wl, wlvif) { 58 wl12xx_for_each_wlvif(wl, wlvif) {
59 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
60 goto out;
61
62 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && 59 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
63 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) 60 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
64 goto out; 61 goto out;
@@ -95,9 +92,6 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
95 return; 92 return;
96 93
97 wl12xx_for_each_wlvif(wl, wlvif) { 94 wl12xx_for_each_wlvif(wl, wlvif) {
98 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
99 return;
100
101 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && 95 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
102 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) 96 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
103 return; 97 return;
@@ -108,6 +102,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
108 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 102 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
109 msecs_to_jiffies(timeout)); 103 msecs_to_jiffies(timeout));
110} 104}
105EXPORT_SYMBOL_GPL(wl1271_ps_elp_sleep);
111 106
112int wl1271_ps_elp_wakeup(struct wl1271 *wl) 107int wl1271_ps_elp_wakeup(struct wl1271 *wl)
113{ 108{
@@ -175,6 +170,7 @@ err:
175out: 170out:
176 return 0; 171 return 0;
177} 172}
173EXPORT_SYMBOL_GPL(wl1271_ps_elp_wakeup);
178 174
179int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, 175int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
180 enum wl1271_cmd_ps_mode mode) 176 enum wl1271_cmd_ps_mode mode)
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index ad86a48dcfcb..fd4e9ba176c9 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -21,7 +21,7 @@ static const
21struct nla_policy wlcore_vendor_attr_policy[NUM_WLCORE_VENDOR_ATTR] = { 21struct nla_policy wlcore_vendor_attr_policy[NUM_WLCORE_VENDOR_ATTR] = {
22 [WLCORE_VENDOR_ATTR_FREQ] = { .type = NLA_U32 }, 22 [WLCORE_VENDOR_ATTR_FREQ] = { .type = NLA_U32 },
23 [WLCORE_VENDOR_ATTR_GROUP_ID] = { .type = NLA_U32 }, 23 [WLCORE_VENDOR_ATTR_GROUP_ID] = { .type = NLA_U32 },
24 [WLCORE_VENDOR_ATTR_GROUP_KEY] = { .type = NLA_U32, 24 [WLCORE_VENDOR_ATTR_GROUP_KEY] = { .type = NLA_BINARY,
25 .len = WLAN_MAX_KEY_LEN }, 25 .len = WLAN_MAX_KEY_LEN },
26}; 26};
27 27
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index df78cf12ef15..d599c869e6e8 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -106,8 +106,7 @@ struct wlcore_ops {
106 struct wl12xx_vif *wlvif, 106 struct wl12xx_vif *wlvif,
107 struct ieee80211_channel_switch *ch_switch); 107 struct ieee80211_channel_switch *ch_switch);
108 u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len); 108 u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len);
109 void (*sta_rc_update)(struct wl1271 *wl, struct wl12xx_vif *wlvif, 109 void (*sta_rc_update)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
110 struct ieee80211_sta *sta, u32 changed);
111 int (*set_peer_cap)(struct wl1271 *wl, 110 int (*set_peer_cap)(struct wl1271 *wl,
112 struct ieee80211_sta_ht_cap *ht_cap, 111 struct ieee80211_sta_ht_cap *ht_cap,
113 bool allow_ht_operation, 112 bool allow_ht_operation,
@@ -117,10 +116,16 @@ struct wlcore_ops {
117 struct wl1271_link *lnk); 116 struct wl1271_link *lnk);
118 bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid, 117 bool (*lnk_low_prio)(struct wl1271 *wl, u8 hlid,
119 struct wl1271_link *lnk); 118 struct wl1271_link *lnk);
119 int (*interrupt_notify)(struct wl1271 *wl, bool action);
120 int (*rx_ba_filter)(struct wl1271 *wl, bool action);
121 int (*ap_sleep)(struct wl1271 *wl);
120 int (*smart_config_start)(struct wl1271 *wl, u32 group_bitmap); 122 int (*smart_config_start)(struct wl1271 *wl, u32 group_bitmap);
121 int (*smart_config_stop)(struct wl1271 *wl); 123 int (*smart_config_stop)(struct wl1271 *wl);
122 int (*smart_config_set_group_key)(struct wl1271 *wl, u16 group_id, 124 int (*smart_config_set_group_key)(struct wl1271 *wl, u16 group_id,
123 u8 key_len, u8 *key); 125 u8 key_len, u8 *key);
126 int (*set_cac)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
127 bool start);
128 int (*dfs_master_restart)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
124}; 129};
125 130
126enum wlcore_partitions { 131enum wlcore_partitions {
@@ -460,6 +465,9 @@ struct wl1271 {
460 /* HW HT (11n) capabilities */ 465 /* HW HT (11n) capabilities */
461 struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS]; 466 struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
462 467
468 /* the current dfs region */
469 enum nl80211_dfs_regions dfs_region;
470
463 /* size of the private FW status data */ 471 /* size of the private FW status data */
464 size_t fw_status_len; 472 size_t fw_status_len;
465 size_t fw_status_priv_len; 473 size_t fw_status_priv_len;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 0e52556044d9..3396ce5a934d 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -251,6 +251,7 @@ enum wl12xx_vif_flags {
251 WLVIF_FLAG_AP_PROBE_RESP_SET, 251 WLVIF_FLAG_AP_PROBE_RESP_SET,
252 WLVIF_FLAG_IN_USE, 252 WLVIF_FLAG_IN_USE,
253 WLVIF_FLAG_ACTIVE, 253 WLVIF_FLAG_ACTIVE,
254 WLVIF_FLAG_BEACON_DISABLED,
254}; 255};
255 256
256struct wl12xx_vif; 257struct wl12xx_vif;
@@ -434,6 +435,8 @@ struct wl12xx_vif {
434 435
435 bool wmm_enabled; 436 bool wmm_enabled;
436 437
438 bool radar_enabled;
439
437 /* Rx Streaming */ 440 /* Rx Streaming */
438 struct work_struct rx_streaming_enable_work; 441 struct work_struct rx_streaming_enable_work;
439 struct work_struct rx_streaming_disable_work; 442 struct work_struct rx_streaming_disable_work;
@@ -463,6 +466,10 @@ struct wl12xx_vif {
463 /* work for canceling ROC after pending auth reply */ 466 /* work for canceling ROC after pending auth reply */
464 struct delayed_work pending_auth_complete_work; 467 struct delayed_work pending_auth_complete_work;
465 468
469 /* update rate conrol */
470 enum ieee80211_sta_rx_bandwidth rc_update_bw;
471 struct work_struct rc_update_work;
472
466 /* 473 /*
467 * total freed FW packets on the link. 474 * total freed FW packets on the link.
468 * For STA this holds the PN of the link to the AP. 475 * For STA this holds the PN of the link to the AP.
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5f1fda44882b..589fa256256b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -251,7 +251,6 @@ struct xenvif {
251struct xenvif_rx_cb { 251struct xenvif_rx_cb {
252 unsigned long expires; 252 unsigned long expires;
253 int meta_slots_used; 253 int meta_slots_used;
254 bool full_coalesce;
255}; 254};
256 255
257#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) 256#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 12f9e2708afb..f38227afe099 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -80,7 +80,7 @@ static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
80 return IRQ_HANDLED; 80 return IRQ_HANDLED;
81} 81}
82 82
83int xenvif_poll(struct napi_struct *napi, int budget) 83static int xenvif_poll(struct napi_struct *napi, int budget)
84{ 84{
85 struct xenvif_queue *queue = 85 struct xenvif_queue *queue =
86 container_of(napi, struct xenvif_queue, napi); 86 container_of(napi, struct xenvif_queue, napi);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7dc2d64db3cb..f7a31d2cb3f1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -233,51 +233,6 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
233 } 233 }
234} 234}
235 235
236/*
237 * Returns true if we should start a new receive buffer instead of
238 * adding 'size' bytes to a buffer which currently contains 'offset'
239 * bytes.
240 */
241static bool start_new_rx_buffer(int offset, unsigned long size, int head,
242 bool full_coalesce)
243{
244 /* simple case: we have completely filled the current buffer. */
245 if (offset == MAX_BUFFER_OFFSET)
246 return true;
247
248 /*
249 * complex case: start a fresh buffer if the current frag
250 * would overflow the current buffer but only if:
251 * (i) this frag would fit completely in the next buffer
252 * and (ii) there is already some data in the current buffer
253 * and (iii) this is not the head buffer.
254 * and (iv) there is no need to fully utilize the buffers
255 *
256 * Where:
257 * - (i) stops us splitting a frag into two copies
258 * unless the frag is too large for a single buffer.
259 * - (ii) stops us from leaving a buffer pointlessly empty.
260 * - (iii) stops us leaving the first buffer
261 * empty. Strictly speaking this is already covered
262 * by (ii) but is explicitly checked because
263 * netfront relies on the first buffer being
264 * non-empty and can crash otherwise.
265 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
266 * slot
267 *
268 * This means we will effectively linearise small
269 * frags but do not needlessly split large buffers
270 * into multiple copies tend to give large frags their
271 * own buffers as before.
272 */
273 BUG_ON(size > MAX_BUFFER_OFFSET);
274 if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
275 !full_coalesce)
276 return true;
277
278 return false;
279}
280
281struct netrx_pending_operations { 236struct netrx_pending_operations {
282 unsigned copy_prod, copy_cons; 237 unsigned copy_prod, copy_cons;
283 unsigned meta_prod, meta_cons; 238 unsigned meta_prod, meta_cons;
@@ -336,24 +291,13 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
336 BUG_ON(offset >= PAGE_SIZE); 291 BUG_ON(offset >= PAGE_SIZE);
337 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); 292 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
338 293
339 bytes = PAGE_SIZE - offset; 294 if (npo->copy_off == MAX_BUFFER_OFFSET)
295 meta = get_next_rx_buffer(queue, npo);
340 296
297 bytes = PAGE_SIZE - offset;
341 if (bytes > size) 298 if (bytes > size)
342 bytes = size; 299 bytes = size;
343 300
344 if (start_new_rx_buffer(npo->copy_off,
345 bytes,
346 *head,
347 XENVIF_RX_CB(skb)->full_coalesce)) {
348 /*
349 * Netfront requires there to be some data in the head
350 * buffer.
351 */
352 BUG_ON(*head);
353
354 meta = get_next_rx_buffer(queue, npo);
355 }
356
357 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) 301 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
358 bytes = MAX_BUFFER_OFFSET - npo->copy_off; 302 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
359 303
@@ -570,60 +514,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
570 514
571 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) 515 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
572 && (skb = xenvif_rx_dequeue(queue)) != NULL) { 516 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
573 RING_IDX max_slots_needed;
574 RING_IDX old_req_cons; 517 RING_IDX old_req_cons;
575 RING_IDX ring_slots_used; 518 RING_IDX ring_slots_used;
576 int i;
577 519
578 queue->last_rx_time = jiffies; 520 queue->last_rx_time = jiffies;
579 521
580 /* We need a cheap worse case estimate for the number of
581 * slots we'll use.
582 */
583
584 max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
585 skb_headlen(skb),
586 PAGE_SIZE);
587 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
588 unsigned int size;
589 unsigned int offset;
590
591 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
592 offset = skb_shinfo(skb)->frags[i].page_offset;
593
594 /* For a worse-case estimate we need to factor in
595 * the fragment page offset as this will affect the
596 * number of times xenvif_gop_frag_copy() will
597 * call start_new_rx_buffer().
598 */
599 max_slots_needed += DIV_ROUND_UP(offset + size,
600 PAGE_SIZE);
601 }
602
603 /* To avoid the estimate becoming too pessimal for some
604 * frontends that limit posted rx requests, cap the estimate
605 * at MAX_SKB_FRAGS. In this case netback will fully coalesce
606 * the skb into the provided slots.
607 */
608 if (max_slots_needed > MAX_SKB_FRAGS) {
609 max_slots_needed = MAX_SKB_FRAGS;
610 XENVIF_RX_CB(skb)->full_coalesce = true;
611 } else {
612 XENVIF_RX_CB(skb)->full_coalesce = false;
613 }
614
615 /* We may need one more slot for GSO metadata */
616 if (skb_is_gso(skb) &&
617 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
618 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
619 max_slots_needed++;
620
621 old_req_cons = queue->rx.req_cons; 522 old_req_cons = queue->rx.req_cons;
622 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); 523 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
623 ring_slots_used = queue->rx.req_cons - old_req_cons; 524 ring_slots_used = queue->rx.req_cons - old_req_cons;
624 525
625 BUG_ON(ring_slots_used > max_slots_needed);
626
627 __skb_queue_tail(&rxq, skb); 526 __skb_queue_tail(&rxq, skb);
628 } 527 }
629 528
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d8c10764f130..e9b960f0ff32 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -142,10 +142,6 @@ struct netfront_queue {
142 struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 142 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
143 grant_ref_t gref_rx_head; 143 grant_ref_t gref_rx_head;
144 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 144 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
145
146 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
147 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
148 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
149}; 145};
150 146
151struct netfront_info { 147struct netfront_info {
@@ -223,11 +219,7 @@ static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
223} 219}
224 220
225#ifdef CONFIG_SYSFS 221#ifdef CONFIG_SYSFS
226static int xennet_sysfs_addif(struct net_device *netdev); 222static const struct attribute_group xennet_dev_group;
227static void xennet_sysfs_delif(struct net_device *netdev);
228#else /* !CONFIG_SYSFS */
229#define xennet_sysfs_addif(dev) (0)
230#define xennet_sysfs_delif(dev) do { } while (0)
231#endif 223#endif
232 224
233static bool xennet_can_sg(struct net_device *dev) 225static bool xennet_can_sg(struct net_device *dev)
@@ -424,109 +416,68 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
424 xennet_maybe_wake_tx(queue); 416 xennet_maybe_wake_tx(queue);
425} 417}
426 418
427static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue, 419static struct xen_netif_tx_request *xennet_make_one_txreq(
428 struct xen_netif_tx_request *tx) 420 struct netfront_queue *queue, struct sk_buff *skb,
421 struct page *page, unsigned int offset, unsigned int len)
429{ 422{
430 char *data = skb->data;
431 unsigned long mfn;
432 RING_IDX prod = queue->tx.req_prod_pvt;
433 int frags = skb_shinfo(skb)->nr_frags;
434 unsigned int offset = offset_in_page(data);
435 unsigned int len = skb_headlen(skb);
436 unsigned int id; 423 unsigned int id;
424 struct xen_netif_tx_request *tx;
437 grant_ref_t ref; 425 grant_ref_t ref;
438 int i;
439 426
440 /* While the header overlaps a page boundary (including being 427 len = min_t(unsigned int, PAGE_SIZE - offset, len);
441 larger than a page), split it it into page-sized chunks. */
442 while (len > PAGE_SIZE - offset) {
443 tx->size = PAGE_SIZE - offset;
444 tx->flags |= XEN_NETTXF_more_data;
445 len -= tx->size;
446 data += tx->size;
447 offset = 0;
448 428
449 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 429 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
450 queue->tx_skbs[id].skb = skb_get(skb); 430 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
451 tx = RING_GET_REQUEST(&queue->tx, prod++); 431 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
452 tx->id = id; 432 BUG_ON((signed short)ref < 0);
453 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
454 BUG_ON((signed short)ref < 0);
455 433
456 mfn = virt_to_mfn(data); 434 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
457 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 435 page_to_mfn(page), GNTMAP_readonly);
458 mfn, GNTMAP_readonly);
459 436
460 queue->grant_tx_page[id] = virt_to_page(data); 437 queue->tx_skbs[id].skb = skb;
461 tx->gref = queue->grant_tx_ref[id] = ref; 438 queue->grant_tx_page[id] = page;
462 tx->offset = offset; 439 queue->grant_tx_ref[id] = ref;
463 tx->size = len;
464 tx->flags = 0;
465 }
466 440
467 /* Grant backend access to each skb fragment page. */ 441 tx->id = id;
468 for (i = 0; i < frags; i++) { 442 tx->gref = ref;
469 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 443 tx->offset = offset;
470 struct page *page = skb_frag_page(frag); 444 tx->size = len;
445 tx->flags = 0;
471 446
472 len = skb_frag_size(frag); 447 return tx;
473 offset = frag->page_offset; 448}
474 449
475 /* Skip unused frames from start of page */ 450static struct xen_netif_tx_request *xennet_make_txreqs(
476 page += offset >> PAGE_SHIFT; 451 struct netfront_queue *queue, struct xen_netif_tx_request *tx,
477 offset &= ~PAGE_MASK; 452 struct sk_buff *skb, struct page *page,
453 unsigned int offset, unsigned int len)
454{
455 /* Skip unused frames from start of page */
456 page += offset >> PAGE_SHIFT;
457 offset &= ~PAGE_MASK;
478 458
479 while (len > 0) { 459 while (len) {
480 unsigned long bytes; 460 tx->flags |= XEN_NETTXF_more_data;
481 461 tx = xennet_make_one_txreq(queue, skb_get(skb),
482 bytes = PAGE_SIZE - offset; 462 page, offset, len);
483 if (bytes > len) 463 page++;
484 bytes = len; 464 offset = 0;
485 465 len -= tx->size;
486 tx->flags |= XEN_NETTXF_more_data;
487
488 id = get_id_from_freelist(&queue->tx_skb_freelist,
489 queue->tx_skbs);
490 queue->tx_skbs[id].skb = skb_get(skb);
491 tx = RING_GET_REQUEST(&queue->tx, prod++);
492 tx->id = id;
493 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
494 BUG_ON((signed short)ref < 0);
495
496 mfn = pfn_to_mfn(page_to_pfn(page));
497 gnttab_grant_foreign_access_ref(ref,
498 queue->info->xbdev->otherend_id,
499 mfn, GNTMAP_readonly);
500
501 queue->grant_tx_page[id] = page;
502 tx->gref = queue->grant_tx_ref[id] = ref;
503 tx->offset = offset;
504 tx->size = bytes;
505 tx->flags = 0;
506
507 offset += bytes;
508 len -= bytes;
509
510 /* Next frame */
511 if (offset == PAGE_SIZE && len) {
512 BUG_ON(!PageCompound(page));
513 page++;
514 offset = 0;
515 }
516 }
517 } 466 }
518 467
519 queue->tx.req_prod_pvt = prod; 468 return tx;
520} 469}
521 470
522/* 471/*
523 * Count how many ring slots are required to send the frags of this 472 * Count how many ring slots are required to send this skb. Each frag
524 * skb. Each frag might be a compound page. 473 * might be a compound page.
525 */ 474 */
526static int xennet_count_skb_frag_slots(struct sk_buff *skb) 475static int xennet_count_skb_slots(struct sk_buff *skb)
527{ 476{
528 int i, frags = skb_shinfo(skb)->nr_frags; 477 int i, frags = skb_shinfo(skb)->nr_frags;
529 int pages = 0; 478 int pages;
479
480 pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb));
530 481
531 for (i = 0; i < frags; i++) { 482 for (i = 0; i < frags; i++) {
532 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 483 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
@@ -562,18 +513,15 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
562 513
563static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 514static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
564{ 515{
565 unsigned short id;
566 struct netfront_info *np = netdev_priv(dev); 516 struct netfront_info *np = netdev_priv(dev);
567 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 517 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
568 struct xen_netif_tx_request *tx; 518 struct xen_netif_tx_request *tx, *first_tx;
569 char *data = skb->data; 519 unsigned int i;
570 RING_IDX i;
571 grant_ref_t ref;
572 unsigned long mfn;
573 int notify; 520 int notify;
574 int slots; 521 int slots;
575 unsigned int offset = offset_in_page(data); 522 struct page *page;
576 unsigned int len = skb_headlen(skb); 523 unsigned int offset;
524 unsigned int len;
577 unsigned long flags; 525 unsigned long flags;
578 struct netfront_queue *queue = NULL; 526 struct netfront_queue *queue = NULL;
579 unsigned int num_queues = dev->real_num_tx_queues; 527 unsigned int num_queues = dev->real_num_tx_queues;
@@ -596,18 +544,18 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
596 goto drop; 544 goto drop;
597 } 545 }
598 546
599 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + 547 slots = xennet_count_skb_slots(skb);
600 xennet_count_skb_frag_slots(skb);
601 if (unlikely(slots > MAX_SKB_FRAGS + 1)) { 548 if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
602 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", 549 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
603 slots, skb->len); 550 slots, skb->len);
604 if (skb_linearize(skb)) 551 if (skb_linearize(skb))
605 goto drop; 552 goto drop;
606 data = skb->data;
607 offset = offset_in_page(data);
608 len = skb_headlen(skb);
609 } 553 }
610 554
555 page = virt_to_page(skb->data);
556 offset = offset_in_page(skb->data);
557 len = skb_headlen(skb);
558
611 spin_lock_irqsave(&queue->tx_lock, flags); 559 spin_lock_irqsave(&queue->tx_lock, flags);
612 560
613 if (unlikely(!netif_carrier_ok(dev) || 561 if (unlikely(!netif_carrier_ok(dev) ||
@@ -617,25 +565,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
617 goto drop; 565 goto drop;
618 } 566 }
619 567
620 i = queue->tx.req_prod_pvt; 568 /* First request for the linear area. */
621 569 first_tx = tx = xennet_make_one_txreq(queue, skb,
622 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 570 page, offset, len);
623 queue->tx_skbs[id].skb = skb; 571 page++;
624 572 offset = 0;
625 tx = RING_GET_REQUEST(&queue->tx, i); 573 len -= tx->size;
626 574
627 tx->id = id;
628 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
629 BUG_ON((signed short)ref < 0);
630 mfn = virt_to_mfn(data);
631 gnttab_grant_foreign_access_ref(
632 ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
633 queue->grant_tx_page[id] = virt_to_page(data);
634 tx->gref = queue->grant_tx_ref[id] = ref;
635 tx->offset = offset;
636 tx->size = len;
637
638 tx->flags = 0;
639 if (skb->ip_summed == CHECKSUM_PARTIAL) 575 if (skb->ip_summed == CHECKSUM_PARTIAL)
640 /* local packet? */ 576 /* local packet? */
641 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; 577 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
@@ -643,11 +579,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
643 /* remote but checksummed. */ 579 /* remote but checksummed. */
644 tx->flags |= XEN_NETTXF_data_validated; 580 tx->flags |= XEN_NETTXF_data_validated;
645 581
582 /* Optional extra info after the first request. */
646 if (skb_shinfo(skb)->gso_size) { 583 if (skb_shinfo(skb)->gso_size) {
647 struct xen_netif_extra_info *gso; 584 struct xen_netif_extra_info *gso;
648 585
649 gso = (struct xen_netif_extra_info *) 586 gso = (struct xen_netif_extra_info *)
650 RING_GET_REQUEST(&queue->tx, ++i); 587 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
651 588
652 tx->flags |= XEN_NETTXF_extra_info; 589 tx->flags |= XEN_NETTXF_extra_info;
653 590
@@ -662,10 +599,19 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
662 gso->flags = 0; 599 gso->flags = 0;
663 } 600 }
664 601
665 queue->tx.req_prod_pvt = i + 1; 602 /* Requests for the rest of the linear area. */
603 tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
604
605 /* Requests for all the frags. */
606 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
607 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
608 tx = xennet_make_txreqs(queue, tx, skb,
609 skb_frag_page(frag), frag->page_offset,
610 skb_frag_size(frag));
611 }
666 612
667 xennet_make_frags(skb, queue, tx); 613 /* First request has the packet length. */
668 tx->size = skb->len; 614 first_tx->size = skb->len;
669 615
670 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 616 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
671 if (notify) 617 if (notify)
@@ -1367,20 +1313,15 @@ static int netfront_probe(struct xenbus_device *dev,
1367 1313
1368 info = netdev_priv(netdev); 1314 info = netdev_priv(netdev);
1369 dev_set_drvdata(&dev->dev, info); 1315 dev_set_drvdata(&dev->dev, info);
1370 1316#ifdef CONFIG_SYSFS
1317 info->netdev->sysfs_groups[0] = &xennet_dev_group;
1318#endif
1371 err = register_netdev(info->netdev); 1319 err = register_netdev(info->netdev);
1372 if (err) { 1320 if (err) {
1373 pr_warn("%s: register_netdev err=%d\n", __func__, err); 1321 pr_warn("%s: register_netdev err=%d\n", __func__, err);
1374 goto fail; 1322 goto fail;
1375 } 1323 }
1376 1324
1377 err = xennet_sysfs_addif(info->netdev);
1378 if (err) {
1379 unregister_netdev(info->netdev);
1380 pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1381 goto fail;
1382 }
1383
1384 return 0; 1325 return 0;
1385 1326
1386 fail: 1327 fail:
@@ -2144,39 +2085,20 @@ static ssize_t store_rxbuf(struct device *dev,
2144 return len; 2085 return len;
2145} 2086}
2146 2087
2147static struct device_attribute xennet_attrs[] = { 2088static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2148 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), 2089static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2149 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf), 2090static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
2150 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL),
2151};
2152
2153static int xennet_sysfs_addif(struct net_device *netdev)
2154{
2155 int i;
2156 int err;
2157
2158 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2159 err = device_create_file(&netdev->dev,
2160 &xennet_attrs[i]);
2161 if (err)
2162 goto fail;
2163 }
2164 return 0;
2165
2166 fail:
2167 while (--i >= 0)
2168 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2169 return err;
2170}
2171
2172static void xennet_sysfs_delif(struct net_device *netdev)
2173{
2174 int i;
2175 2091
2176 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) 2092static struct attribute *xennet_dev_attrs[] = {
2177 device_remove_file(&netdev->dev, &xennet_attrs[i]); 2093 &dev_attr_rxbuf_min.attr,
2178} 2094 &dev_attr_rxbuf_max.attr,
2095 &dev_attr_rxbuf_cur.attr,
2096 NULL
2097};
2179 2098
2099static const struct attribute_group xennet_dev_group = {
2100 .attrs = xennet_dev_attrs
2101};
2180#endif /* CONFIG_SYSFS */ 2102#endif /* CONFIG_SYSFS */
2181 2103
2182static int xennet_remove(struct xenbus_device *dev) 2104static int xennet_remove(struct xenbus_device *dev)
@@ -2190,8 +2112,6 @@ static int xennet_remove(struct xenbus_device *dev)
2190 2112
2191 xennet_disconnect_backend(info); 2113 xennet_disconnect_backend(info);
2192 2114
2193 xennet_sysfs_delif(info->netdev);
2194
2195 unregister_netdev(info->netdev); 2115 unregister_netdev(info->netdev);
2196 2116
2197 for (i = 0; i < num_queues; ++i) { 2117 for (i = 0; i < num_queues; ++i) {